ZTWHHH commited on
Commit
aefd515
·
verified ·
1 Parent(s): 8c70d82

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/share/terminfo/v/v200-nam +0 -0
  2. llava_next/share/terminfo/v/vapple +0 -0
  3. llava_next/share/terminfo/v/vc203 +0 -0
  4. llava_next/share/terminfo/v/vc403a +0 -0
  5. llava_next/share/terminfo/v/vi500 +0 -0
  6. llava_next/share/terminfo/v/viewdata-rv +0 -0
  7. llava_next/share/terminfo/v/viewpoint3a+ +0 -0
  8. llava_next/share/terminfo/v/visa50 +0 -0
  9. llava_next/share/terminfo/v/vitty +0 -0
  10. llava_next/share/terminfo/v/vp60 +0 -0
  11. llava_next/share/terminfo/v/vt100+4bsd +0 -0
  12. llava_next/share/terminfo/v/vt100+keypad +0 -0
  13. llava_next/share/terminfo/v/vt100-bm-o +0 -0
  14. llava_next/share/terminfo/v/vt100-nam +0 -0
  15. llava_next/share/terminfo/v/vt100-s +0 -0
  16. llava_next/share/terminfo/v/vt100-w +0 -0
  17. llava_next/share/terminfo/v/vt100-w-nam +0 -0
  18. llava_next/share/terminfo/v/vt102-nsgr +0 -0
  19. llava_next/share/terminfo/v/vt131 +0 -0
  20. llava_next/share/terminfo/v/vt200 +0 -0
  21. llava_next/share/terminfo/v/vt200-8bit +0 -0
  22. llava_next/share/terminfo/v/vt200-w +0 -0
  23. llava_next/share/terminfo/v/vt220+pcedit +0 -0
  24. llava_next/share/terminfo/v/vt220+vtedit +0 -0
  25. llava_next/share/terminfo/v/vt220-w +0 -0
  26. llava_next/share/terminfo/v/vt300-w-nam +0 -0
  27. llava_next/share/terminfo/v/vt320-w-nam +0 -0
  28. llava_next/share/terminfo/v/vt420+lrmm +0 -0
  29. llava_next/share/terminfo/v/vt420pc +0 -0
  30. llava_next/share/terminfo/v/vt510 +0 -0
  31. llava_next/share/terminfo/v/vt52 +0 -0
  32. llava_next/share/terminfo/v/vt61 +0 -0
  33. llava_next/share/terminfo/v/vte +0 -0
  34. llava_next/share/terminfo/v/vte-2018 +0 -0
  35. llava_next/share/terminfo/v/vte-direct +0 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/__init__.py +0 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg.py +49 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg_impl.py +1588 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator.py +1693 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_addition.py +437 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_adjoint.py +238 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_diag.py +818 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py +986 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_circulant.py +1551 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_composition.py +404 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_diag.py +388 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_full_matrix.py +207 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_householder.py +285 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_identity.py +929 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_inversion.py +231 -0
llava_next/share/terminfo/v/v200-nam ADDED
Binary file (1.71 kB). View file
 
llava_next/share/terminfo/v/vapple ADDED
Binary file (370 Bytes). View file
 
llava_next/share/terminfo/v/vc203 ADDED
Binary file (309 Bytes). View file
 
llava_next/share/terminfo/v/vc403a ADDED
Binary file (309 Bytes). View file
 
llava_next/share/terminfo/v/vi500 ADDED
Binary file (551 Bytes). View file
 
llava_next/share/terminfo/v/viewdata-rv ADDED
Binary file (880 Bytes). View file
 
llava_next/share/terminfo/v/viewpoint3a+ ADDED
Binary file (585 Bytes). View file
 
llava_next/share/terminfo/v/visa50 ADDED
Binary file (979 Bytes). View file
 
llava_next/share/terminfo/v/vitty ADDED
Binary file (470 Bytes). View file
 
llava_next/share/terminfo/v/vp60 ADDED
Binary file (520 Bytes). View file
 
llava_next/share/terminfo/v/vt100+4bsd ADDED
Binary file (1 kB). View file
 
llava_next/share/terminfo/v/vt100+keypad ADDED
Binary file (368 Bytes). View file
 
llava_next/share/terminfo/v/vt100-bm-o ADDED
Binary file (1.5 kB). View file
 
llava_next/share/terminfo/v/vt100-nam ADDED
Binary file (1.28 kB). View file
 
llava_next/share/terminfo/v/vt100-s ADDED
Binary file (1.36 kB). View file
 
llava_next/share/terminfo/v/vt100-w ADDED
Binary file (1.3 kB). View file
 
llava_next/share/terminfo/v/vt100-w-nam ADDED
Binary file (1.31 kB). View file
 
llava_next/share/terminfo/v/vt102-nsgr ADDED
Binary file (1.26 kB). View file
 
llava_next/share/terminfo/v/vt131 ADDED
Binary file (690 Bytes). View file
 
llava_next/share/terminfo/v/vt200 ADDED
Binary file (1.64 kB). View file
 
llava_next/share/terminfo/v/vt200-8bit ADDED
Binary file (1.52 kB). View file
 
llava_next/share/terminfo/v/vt200-w ADDED
Binary file (1.67 kB). View file
 
llava_next/share/terminfo/v/vt220+pcedit ADDED
Binary file (428 Bytes). View file
 
llava_next/share/terminfo/v/vt220+vtedit ADDED
Binary file (488 Bytes). View file
 
llava_next/share/terminfo/v/vt220-w ADDED
Binary file (1.67 kB). View file
 
llava_next/share/terminfo/v/vt300-w-nam ADDED
Binary file (1.82 kB). View file
 
llava_next/share/terminfo/v/vt320-w-nam ADDED
Binary file (1.82 kB). View file
 
llava_next/share/terminfo/v/vt420+lrmm ADDED
Binary file (851 Bytes). View file
 
llava_next/share/terminfo/v/vt420pc ADDED
Binary file (2.64 kB). View file
 
llava_next/share/terminfo/v/vt510 ADDED
Binary file (2.19 kB). View file
 
llava_next/share/terminfo/v/vt52 ADDED
Binary file (839 Bytes). View file
 
llava_next/share/terminfo/v/vt61 ADDED
Binary file (432 Bytes). View file
 
llava_next/share/terminfo/v/vte ADDED
Binary file (3.53 kB). View file
 
llava_next/share/terminfo/v/vte-2018 ADDED
Binary file (3.53 kB). View file
 
llava_next/share/terminfo/v/vte-direct ADDED
Binary file (3.73 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Public API for tf.linalg namespace."""
16
+
17
+ # go/tf-wildcard-import
18
+ # pylint: disable=wildcard-import,unused-import
19
+ from tensorflow.python.ops.linalg.linalg_impl import *
20
+ from tensorflow.python.ops.linalg.linear_operator import *
21
+ from tensorflow.python.ops.linalg.linear_operator_adjoint import *
22
+ from tensorflow.python.ops.linalg.linear_operator_block_diag import *
23
+ from tensorflow.python.ops.linalg.linear_operator_block_lower_triangular import *
24
+ from tensorflow.python.ops.linalg.linear_operator_circulant import *
25
+ from tensorflow.python.ops.linalg.linear_operator_composition import *
26
+ from tensorflow.python.ops.linalg.linear_operator_diag import *
27
+ from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
28
+ from tensorflow.python.ops.linalg.linear_operator_householder import *
29
+ from tensorflow.python.ops.linalg.linear_operator_identity import *
30
+ from tensorflow.python.ops.linalg.linear_operator_inversion import *
31
+ from tensorflow.python.ops.linalg.linear_operator_kronecker import *
32
+ from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
33
+ from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
34
+ from tensorflow.python.ops.linalg.linear_operator_permutation import *
35
+ from tensorflow.python.ops.linalg.linear_operator_toeplitz import *
36
+ from tensorflow.python.ops.linalg.linear_operator_tridiag import *
37
+ from tensorflow.python.ops.linalg.linear_operator_zeros import *
38
+ # pylint: enable=wildcard-import
39
+
40
+ # Seal API.
41
+ # pylint: disable=undefined-variable
42
+ del ops
43
+ del array_ops
44
+ del gen_linalg_ops
45
+ del linalg_ops
46
+ del math_ops
47
+ del special_math_ops
48
+ del tf_export
49
+ # pylint: enable=undefined-variable
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg_impl.py ADDED
@@ -0,0 +1,1588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Operations for linear algebra."""
16
+
17
+ import numpy as np
18
+
19
+ from tensorflow.python.framework import constant_op
20
+ from tensorflow.python.framework import dtypes
21
+ from tensorflow.python.framework import ops
22
+ from tensorflow.python.framework import tensor_shape
23
+ from tensorflow.python.ops import array_ops
24
+ from tensorflow.python.ops import array_ops_stack
25
+ from tensorflow.python.ops import check_ops
26
+ from tensorflow.python.ops import cond as tf_cond
27
+ from tensorflow.python.ops import control_flow_ops
28
+ from tensorflow.python.ops import gen_linalg_ops
29
+ from tensorflow.python.ops import linalg_ops
30
+ from tensorflow.python.ops import map_fn
31
+ from tensorflow.python.ops import math_ops
32
+ from tensorflow.python.ops import special_math_ops
33
+ from tensorflow.python.ops import stateless_random_ops
34
+ from tensorflow.python.ops import while_loop
35
+ from tensorflow.python.util import dispatch
36
+ from tensorflow.python.util.tf_export import tf_export
37
+
38
+ # Linear algebra ops.
39
+ band_part = array_ops.matrix_band_part
40
+ cholesky = linalg_ops.cholesky
41
+ cholesky_solve = linalg_ops.cholesky_solve
42
+ det = linalg_ops.matrix_determinant
43
+ slogdet = gen_linalg_ops.log_matrix_determinant
44
+ tf_export('linalg.slogdet')(dispatch.add_dispatch_support(slogdet))
45
+ diag = array_ops.matrix_diag
46
+ diag_part = array_ops.matrix_diag_part
47
+ eigh = linalg_ops.self_adjoint_eig
48
+ eigvalsh = linalg_ops.self_adjoint_eigvals
49
+ einsum = special_math_ops.einsum
50
+ eye = linalg_ops.eye
51
+ inv = linalg_ops.matrix_inverse
52
+ logm = gen_linalg_ops.matrix_logarithm
53
+ lu = gen_linalg_ops.lu
54
+ tf_export('linalg.logm')(dispatch.add_dispatch_support(logm))
55
+ lstsq = linalg_ops.matrix_solve_ls
56
+ norm = linalg_ops.norm
57
+ qr = linalg_ops.qr
58
+ set_diag = array_ops.matrix_set_diag
59
+ solve = linalg_ops.matrix_solve
60
+ sqrtm = linalg_ops.matrix_square_root
61
+ svd = linalg_ops.svd
62
+ tensordot = math_ops.tensordot
63
+ trace = math_ops.trace
64
+ transpose = array_ops.matrix_transpose
65
+ triangular_solve = linalg_ops.matrix_triangular_solve
66
+
67
+
68
+ @tf_export('linalg.logdet')
69
+ @dispatch.add_dispatch_support
70
+ def logdet(matrix, name=None):
71
+ """Computes log of the determinant of a hermitian positive definite matrix.
72
+
73
+ ```python
74
+ # Compute the determinant of a matrix while reducing the chance of over- or
75
+ underflow:
76
+ A = ... # shape 10 x 10
77
+ det = tf.exp(tf.linalg.logdet(A)) # scalar
78
+ ```
79
+
80
+ Args:
81
+ matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
82
+ or `complex128` with shape `[..., M, M]`.
83
+ name: A name to give this `Op`. Defaults to `logdet`.
84
+
85
+ Returns:
86
+ The natural log of the determinant of `matrix`.
87
+
88
+ @compatibility(numpy)
89
+ Equivalent to numpy.linalg.slogdet, although no sign is returned since only
90
+ hermitian positive definite matrices are supported.
91
+ @end_compatibility
92
+ """
93
+ # This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
94
+ # where C is the cholesky decomposition of A.
95
+ with ops.name_scope(name, 'logdet', [matrix]):
96
+ chol = gen_linalg_ops.cholesky(matrix)
97
+ return 2.0 * math_ops.reduce_sum(
98
+ math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
99
+ axis=[-1])
100
+
101
+
102
+ @tf_export('linalg.adjoint')
103
+ @dispatch.add_dispatch_support
104
+ def adjoint(matrix, name=None):
105
+ """Transposes the last two dimensions of and conjugates tensor `matrix`.
106
+
107
+ For example:
108
+
109
+ ```python
110
+ x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
111
+ [4 + 4j, 5 + 5j, 6 + 6j]])
112
+ tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
113
+ # [2 - 2j, 5 - 5j],
114
+ # [3 - 3j, 6 - 6j]]
115
+ ```
116
+
117
+ Args:
118
+ matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
119
+ or `complex128` with shape `[..., M, M]`.
120
+ name: A name to give this `Op` (optional).
121
+
122
+ Returns:
123
+ The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
124
+ matrix.
125
+ """
126
+ with ops.name_scope(name, 'adjoint', [matrix]):
127
+ matrix = ops.convert_to_tensor(matrix, name='matrix')
128
+ return array_ops.matrix_transpose(matrix, conjugate=True)
129
+
130
+
131
+ # This section is ported nearly verbatim from Eigen's implementation:
132
+ # https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
133
+ def _matrix_exp_pade3(matrix):
134
+ """3rd-order Pade approximant for matrix exponential."""
135
+ b = [120.0, 60.0, 12.0]
136
+ b = [constant_op.constant(x, matrix.dtype) for x in b]
137
+ ident = linalg_ops.eye(
138
+ array_ops.shape(matrix)[-2],
139
+ batch_shape=array_ops.shape(matrix)[:-2],
140
+ dtype=matrix.dtype)
141
+ matrix_2 = math_ops.matmul(matrix, matrix)
142
+ tmp = matrix_2 + b[1] * ident
143
+ matrix_u = math_ops.matmul(matrix, tmp)
144
+ matrix_v = b[2] * matrix_2 + b[0] * ident
145
+ return matrix_u, matrix_v
146
+
147
+
148
+ def _matrix_exp_pade5(matrix):
149
+ """5th-order Pade approximant for matrix exponential."""
150
+ b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
151
+ b = [constant_op.constant(x, matrix.dtype) for x in b]
152
+ ident = linalg_ops.eye(
153
+ array_ops.shape(matrix)[-2],
154
+ batch_shape=array_ops.shape(matrix)[:-2],
155
+ dtype=matrix.dtype)
156
+ matrix_2 = math_ops.matmul(matrix, matrix)
157
+ matrix_4 = math_ops.matmul(matrix_2, matrix_2)
158
+ tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
159
+ matrix_u = math_ops.matmul(matrix, tmp)
160
+ matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
161
+ return matrix_u, matrix_v
162
+
163
+
164
+ def _matrix_exp_pade7(matrix):
165
+ """7th-order Pade approximant for matrix exponential."""
166
+ b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
167
+ b = [constant_op.constant(x, matrix.dtype) for x in b]
168
+ ident = linalg_ops.eye(
169
+ array_ops.shape(matrix)[-2],
170
+ batch_shape=array_ops.shape(matrix)[:-2],
171
+ dtype=matrix.dtype)
172
+ matrix_2 = math_ops.matmul(matrix, matrix)
173
+ matrix_4 = math_ops.matmul(matrix_2, matrix_2)
174
+ matrix_6 = math_ops.matmul(matrix_4, matrix_2)
175
+ tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
176
+ matrix_u = math_ops.matmul(matrix, tmp)
177
+ matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
178
+ return matrix_u, matrix_v
179
+
180
+
181
+ def _matrix_exp_pade9(matrix):
182
+ """9th-order Pade approximant for matrix exponential."""
183
+ b = [
184
+ 17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
185
+ 2162160.0, 110880.0, 3960.0, 90.0
186
+ ]
187
+ b = [constant_op.constant(x, matrix.dtype) for x in b]
188
+ ident = linalg_ops.eye(
189
+ array_ops.shape(matrix)[-2],
190
+ batch_shape=array_ops.shape(matrix)[:-2],
191
+ dtype=matrix.dtype)
192
+ matrix_2 = math_ops.matmul(matrix, matrix)
193
+ matrix_4 = math_ops.matmul(matrix_2, matrix_2)
194
+ matrix_6 = math_ops.matmul(matrix_4, matrix_2)
195
+ matrix_8 = math_ops.matmul(matrix_6, matrix_2)
196
+ tmp = (
197
+ matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
198
+ b[1] * ident)
199
+ matrix_u = math_ops.matmul(matrix, tmp)
200
+ matrix_v = (
201
+ b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
202
+ b[0] * ident)
203
+ return matrix_u, matrix_v
204
+
205
+
206
+ def _matrix_exp_pade13(matrix):
207
+ """13th-order Pade approximant for matrix exponential."""
208
+ b = [
209
+ 64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
210
+ 1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
211
+ 33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
212
+ ]
213
+ b = [constant_op.constant(x, matrix.dtype) for x in b]
214
+ ident = linalg_ops.eye(
215
+ array_ops.shape(matrix)[-2],
216
+ batch_shape=array_ops.shape(matrix)[:-2],
217
+ dtype=matrix.dtype)
218
+ matrix_2 = math_ops.matmul(matrix, matrix)
219
+ matrix_4 = math_ops.matmul(matrix_2, matrix_2)
220
+ matrix_6 = math_ops.matmul(matrix_4, matrix_2)
221
+ tmp_u = (
222
+ math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
223
+ b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
224
+ matrix_u = math_ops.matmul(matrix, tmp_u)
225
+ tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
226
+ matrix_v = (
227
+ math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
228
+ b[2] * matrix_2 + b[0] * ident)
229
+ return matrix_u, matrix_v
230
+
231
+
232
+ @tf_export('linalg.expm')
233
+ @dispatch.add_dispatch_support
234
+ def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
235
+ r"""Computes the matrix exponential of one or more square matrices.
236
+
237
+ $$exp(A) = \sum_{n=0}^\infty A^n/n!$$
238
+
239
+ The exponential is computed using a combination of the scaling and squaring
240
+ method and the Pade approximation. Details can be found in:
241
+ Nicholas J. Higham, "The scaling and squaring method for the matrix
242
+ exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
243
+
244
+ The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
245
+ form square matrices. The output is a tensor of the same shape as the input
246
+ containing the exponential for all input submatrices `[..., :, :]`.
247
+
248
+ Args:
249
+ input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
250
+ `complex128` with shape `[..., M, M]`.
251
+ name: A name to give this `Op` (optional).
252
+
253
+ Returns:
254
+ the matrix exponential of the input.
255
+
256
+ Raises:
257
+ ValueError: An unsupported type is provided as input.
258
+
259
+ @compatibility(scipy)
260
+ Equivalent to scipy.linalg.expm
261
+ @end_compatibility
262
+ """
263
+ with ops.name_scope(name, 'matrix_exponential', [input]):
264
+ matrix = ops.convert_to_tensor(input, name='input')
265
+ if matrix.shape[-2:] == [0, 0]:
266
+ return matrix
267
+ batch_shape = matrix.shape[:-2]
268
+ if not batch_shape.is_fully_defined():
269
+ batch_shape = array_ops.shape(matrix)[:-2]
270
+
271
+ # reshaping the batch makes the where statements work better
272
+ matrix = array_ops.reshape(
273
+ matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
274
+ l1_norm = math_ops.reduce_max(
275
+ math_ops.reduce_sum(
276
+ math_ops.abs(matrix),
277
+ axis=array_ops.size(array_ops.shape(matrix)) - 2),
278
+ axis=-1)[..., array_ops.newaxis, array_ops.newaxis]
279
+
280
+ const = lambda x: constant_op.constant(x, l1_norm.dtype)
281
+
282
+ def _nest_where(vals, cases):
283
+ assert len(vals) == len(cases) - 1
284
+ if len(vals) == 1:
285
+ return array_ops.where_v2(
286
+ math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
287
+ else:
288
+ return array_ops.where_v2(
289
+ math_ops.less(l1_norm, const(vals[0])), cases[0],
290
+ _nest_where(vals[1:], cases[1:]))
291
+
292
+ if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
293
+ maxnorm = const(3.925724783138660)
294
+ squarings = math_ops.maximum(
295
+ math_ops.floor(
296
+ math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
297
+ u3, v3 = _matrix_exp_pade3(matrix)
298
+ u5, v5 = _matrix_exp_pade5(matrix)
299
+ u7, v7 = _matrix_exp_pade7(
300
+ matrix /
301
+ math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
302
+ conds = (4.258730016922831e-001, 1.880152677804762e+000)
303
+ u = _nest_where(conds, (u3, u5, u7))
304
+ v = _nest_where(conds, (v3, v5, v7))
305
+ elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
306
+ maxnorm = const(5.371920351148152)
307
+ squarings = math_ops.maximum(
308
+ math_ops.floor(
309
+ math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
310
+ u3, v3 = _matrix_exp_pade3(matrix)
311
+ u5, v5 = _matrix_exp_pade5(matrix)
312
+ u7, v7 = _matrix_exp_pade7(matrix)
313
+ u9, v9 = _matrix_exp_pade9(matrix)
314
+ u13, v13 = _matrix_exp_pade13(
315
+ matrix /
316
+ math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
317
+ conds = (1.495585217958292e-002, 2.539398330063230e-001,
318
+ 9.504178996162932e-001, 2.097847961257068e+000)
319
+ u = _nest_where(conds, (u3, u5, u7, u9, u13))
320
+ v = _nest_where(conds, (v3, v5, v7, v9, v13))
321
+ else:
322
+ raise ValueError('tf.linalg.expm does not support matrices of type %s' %
323
+ matrix.dtype)
324
+
325
+ is_finite = math_ops.is_finite(math_ops.reduce_max(l1_norm))
326
+ nan = constant_op.constant(np.nan, matrix.dtype)
327
+ result = tf_cond.cond(
328
+ is_finite, lambda: linalg_ops.matrix_solve(-u + v, u + v),
329
+ lambda: array_ops.fill(array_ops.shape(matrix), nan))
330
+ max_squarings = math_ops.reduce_max(squarings)
331
+ i = const(0.0)
332
+
333
+ def c(i, _):
334
+ return tf_cond.cond(is_finite,
335
+ lambda: math_ops.less(i, max_squarings),
336
+ lambda: constant_op.constant(False))
337
+
338
+ def b(i, r):
339
+ return i + 1, array_ops.where_v2(
340
+ math_ops.less(i, squarings), math_ops.matmul(r, r), r)
341
+
342
+ _, result = while_loop.while_loop(c, b, [i, result])
343
+ if not matrix.shape.is_fully_defined():
344
+ return array_ops.reshape(
345
+ result,
346
+ array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
347
+ return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
348
+
349
+
350
+ @tf_export('linalg.banded_triangular_solve', v1=[])
351
+ def banded_triangular_solve(
352
+ bands,
353
+ rhs,
354
+ lower=True,
355
+ adjoint=False, # pylint: disable=redefined-outer-name
356
+ name=None):
357
+ r"""Solve triangular systems of equations with a banded solver.
358
+
359
+ `bands` is a tensor of shape `[..., K, M]`, where `K` represents the number
360
+ of bands stored. This corresponds to a batch of `M` by `M` matrices, whose
361
+ `K` subdiagonals (when `lower` is `True`) are stored.
362
+
363
+ This operator broadcasts the batch dimensions of `bands` and the batch
364
+ dimensions of `rhs`.
365
+
366
+
367
+ Examples:
368
+
369
+ Storing 2 bands of a 3x3 matrix.
370
+ Note that first element in the second row is ignored due to
371
+ the 'LEFT_RIGHT' padding.
372
+
373
+ >>> x = [[2., 3., 4.], [1., 2., 3.]]
374
+ >>> x2 = [[2., 3., 4.], [10000., 2., 3.]]
375
+ >>> y = tf.zeros([3, 3])
376
+ >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0))
377
+ >>> z
378
+ <tf.Tensor: shape=(3, 3), dtype=float32, numpy=
379
+ array([[2., 0., 0.],
380
+ [2., 3., 0.],
381
+ [0., 3., 4.]], dtype=float32)>
382
+ >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1]))
383
+ >>> soln
384
+ <tf.Tensor: shape=(3, 1), dtype=float32, numpy=
385
+ array([[0.5 ],
386
+ [0. ],
387
+ [0.25]], dtype=float32)>
388
+ >>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1]))
389
+ >>> tf.reduce_all(are_equal).numpy()
390
+ True
391
+ >>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1]))
392
+ >>> tf.reduce_all(are_equal).numpy()
393
+ True
394
+
395
+ Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding
396
+ the last element of the first row is ignored.
397
+
398
+ >>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]]
399
+ >>> y = tf.zeros([4, 4])
400
+ >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1))
401
+ >>> z
402
+ <tf.Tensor: shape=(4, 4), dtype=float32, numpy=
403
+ array([[-1., 2., 0., 0.],
404
+ [ 0., -2., 3., 0.],
405
+ [ 0., 0., -3., 4.],
406
+ [ 0., 0., -0., -4.]], dtype=float32)>
407
+ >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False)
408
+ >>> soln
409
+ <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
410
+ array([[-4. ],
411
+ [-1.5 ],
412
+ [-0.6666667],
413
+ [-0.25 ]], dtype=float32)>
414
+ >>> are_equal = (soln == tf.linalg.triangular_solve(
415
+ ... z, tf.ones([4, 1]), lower=False))
416
+ >>> tf.reduce_all(are_equal).numpy()
417
+ True
418
+
419
+
420
+ Args:
421
+ bands: A `Tensor` describing the bands of the left hand side, with shape
422
+ `[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th
423
+ diagonal (the diagonal is the top row) when `lower` is `True` and
424
+ otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is
425
+ the bottom row) when `lower` is `False`. The bands are stored with
426
+ 'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right
427
+ and subdiagonals are padded on the left. This is the alignment cuSPARSE
428
+ uses. See `tf.linalg.set_diag` for more details.
429
+ rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as
430
+ `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
431
+ statically, `rhs` will be treated as a matrix rather than a vector.
432
+ lower: An optional `bool`. Defaults to `True`. Boolean indicating whether
433
+ `bands` represents a lower or upper triangular matrix.
434
+ adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether
435
+ to solve with the matrix's block-wise adjoint.
436
+ name: A name to give this `Op` (optional).
437
+
438
+ Returns:
439
+ A `Tensor` of shape [..., M] or [..., M, N] containing the solutions.
440
+ """
441
+ with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]):
442
+ return gen_linalg_ops.banded_triangular_solve(
443
+ bands, rhs, lower=lower, adjoint=adjoint)
444
+
445
+
446
+ @tf_export('linalg.tridiagonal_solve')
447
+ @dispatch.add_dispatch_support
448
+ def tridiagonal_solve(diagonals,
449
+ rhs,
450
+ diagonals_format='compact',
451
+ transpose_rhs=False,
452
+ conjugate_rhs=False,
453
+ name=None,
454
+ partial_pivoting=True,
455
+ perturb_singular=False):
456
+ r"""Solves tridiagonal systems of equations.
457
+
458
+ The input can be supplied in various formats: `matrix`, `sequence` and
459
+ `compact`, specified by the `diagonals_format` arg.
460
+
461
+ In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
462
+ two inner-most dimensions representing the square tridiagonal matrices.
463
+ Elements outside of the three diagonals will be ignored.
464
+
465
+ In `sequence` format, `diagonals` are supplied as a tuple or list of three
466
+ tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
467
+ superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
468
+ `M-1` or `M`; in the latter case, the last element of superdiagonal and the
469
+ first element of subdiagonal will be ignored.
470
+
471
+ In `compact` format the three diagonals are brought together into one tensor
472
+ of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
473
+ diagonals, and subdiagonals, in order. Similarly to `sequence` format,
474
+ elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
475
+
476
+ The `compact` format is recommended as the one with best performance. In case
477
+ you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
478
+ An example for a tensor of shape [m, m]:
479
+
480
+ ```python
481
+ rhs = tf.constant([...])
482
+ matrix = tf.constant([[...]])
483
+ m = matrix.shape[0]
484
+ dummy_idx = [0, 0] # An arbitrary element to use as a dummy
485
+ indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
486
+ [[i, i] for i in range(m)], # Diagonal
487
+ [dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
488
+ diagonals=tf.gather_nd(matrix, indices)
489
+ x = tf.linalg.tridiagonal_solve(diagonals, rhs)
490
+ ```
491
+
492
+ Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
493
+ `[..., M, K]`. The latter allows to simultaneously solve K systems with the
494
+ same left-hand sides and K different right-hand sides. If `transpose_rhs`
495
+ is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
496
+
497
+ The batch dimensions, denoted as `...`, must be the same in `diagonals` and
498
+ `rhs`.
499
+
500
+ The output is a tensor of the same shape as `rhs`: either `[..., M]` or
501
+ `[..., M, K]`.
502
+
503
+ The op isn't guaranteed to raise an error if the input matrix is not
504
+ invertible. `tf.debugging.check_numerics` can be applied to the output to
505
+ detect invertibility problems.
506
+
507
+ **Note**: with large batch sizes, the computation on the GPU may be slow, if
508
+ either `partial_pivoting=True` or there are multiple right-hand sides
509
+ (`K > 1`). If this issue arises, consider if it's possible to disable pivoting
510
+ and have `K = 1`, or, alternatively, consider using CPU.
511
+
512
+ On CPU, solution is computed via Gaussian elimination with or without partial
513
+ pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
514
+ library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
515
+
516
+ Args:
517
+ diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
518
+ shape depends of `diagonals_format`, see description above. Must be
519
+ `float32`, `float64`, `complex64`, or `complex128`.
520
+ rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
521
+ `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
522
+ statically, `rhs` will be treated as a matrix rather than a vector.
523
+ diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
524
+ `compact`.
525
+ transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
526
+ if the shape of rhs is [..., M]).
527
+ conjugate_rhs: If `True`, `rhs` is conjugated before solving.
528
+ name: A name to give this `Op` (optional).
529
+ partial_pivoting: whether to perform partial pivoting. `True` by default.
530
+ Partial pivoting makes the procedure more stable, but slower. Partial
531
+ pivoting is unnecessary in some cases, including diagonally dominant and
532
+ symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
533
+ perturb_singular: whether to perturb singular matrices to return a finite
534
+ result. `False` by default. If true, solutions to systems involving
535
+ a singular matrix will be computed by perturbing near-zero pivots in
536
+ the partially pivoted LU decomposition. Specifically, tiny pivots are
537
+ perturbed by an amount of order `eps * max_{ij} |U(i,j)|` to avoid
538
+ overflow. Here `U` is the upper triangular part of the LU decomposition,
539
+ and `eps` is the machine precision. This is useful for solving
540
+ numerically singular systems when computing eigenvectors by inverse
541
+ iteration.
542
+ If `partial_pivoting` is `False`, `perturb_singular` must be `False` as
543
+ well.
544
+
545
+ Returns:
546
+ A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
547
+ If the input matrix is singular, the result is undefined.
548
+
549
+ Raises:
550
+ ValueError: Is raised if any of the following conditions hold:
551
+ 1. An unsupported type is provided as input,
552
+ 2. the input tensors have incorrect shapes,
553
+ 3. `perturb_singular` is `True` but `partial_pivoting` is not.
554
+ UnimplementedError: Whenever `partial_pivoting` is true and the backend is
555
+ XLA, or whenever `perturb_singular` is true and the backend is
556
+ XLA or GPU.
557
+
558
+ [1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
559
+ Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
560
+
561
+ """
562
+ if perturb_singular and not partial_pivoting:
563
+ raise ValueError('partial_pivoting must be True if perturb_singular is.')
564
+
565
+ if diagonals_format == 'compact':
566
+ return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
567
+ conjugate_rhs, partial_pivoting,
568
+ perturb_singular, name)
569
+
570
+ if diagonals_format == 'sequence':
571
+ if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
572
+ raise ValueError('Expected diagonals to be a sequence of length 3.')
573
+
574
+ superdiag, maindiag, subdiag = diagonals
575
+ if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
576
+ not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
577
+ raise ValueError(
578
+ 'Tensors representing the three diagonals must have the same shape,'
579
+ 'except for the last dimension, got {}, {}, {}'.format(
580
+ subdiag.shape, maindiag.shape, superdiag.shape))
581
+
582
+ m = tensor_shape.dimension_value(maindiag.shape[-1])
583
+
584
+ def pad_if_necessary(t, name, last_dim_padding):
585
+ n = tensor_shape.dimension_value(t.shape[-1])
586
+ if not n or n == m:
587
+ return t
588
+ if n == m - 1:
589
+ paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
590
+ [last_dim_padding])
591
+ return array_ops.pad(t, paddings)
592
+ raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
593
+ name, m, m - 1, n))
594
+
595
+ subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
596
+ superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
597
+
598
+ diagonals = array_ops_stack.stack((superdiag, maindiag, subdiag), axis=-2)
599
+ return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
600
+ conjugate_rhs, partial_pivoting,
601
+ perturb_singular, name)
602
+
603
+ if diagonals_format == 'matrix':
604
+ m1 = tensor_shape.dimension_value(diagonals.shape[-1])
605
+ m2 = tensor_shape.dimension_value(diagonals.shape[-2])
606
+ if m1 and m2 and m1 != m2:
607
+ raise ValueError(
608
+ 'Expected last two dimensions of diagonals to be same, got {} and {}'
609
+ .format(m1, m2))
610
+ m = m1 or m2
611
+ diagonals = array_ops.matrix_diag_part(
612
+ diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
613
+ return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
614
+ conjugate_rhs, partial_pivoting,
615
+ perturb_singular, name)
616
+
617
+ raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
618
+
619
+
620
+ def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
621
+ conjugate_rhs, partial_pivoting,
622
+ perturb_singular, name):
623
+ """Helper function used after the input has been cast to compact form."""
624
+ diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank
625
+
626
+ # If we know the rank of the diagonal tensor, do some static checking.
627
+ if diags_rank:
628
+ if diags_rank < 2:
629
+ raise ValueError(
630
+ 'Expected diagonals to have rank at least 2, got {}'.format(
631
+ diags_rank))
632
+ if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
633
+ raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
634
+ diags_rank - 1, diags_rank, rhs_rank))
635
+ if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(
636
+ rhs.shape[:diags_rank - 2])):
637
+ raise ValueError('Batch shapes {} and {} are incompatible'.format(
638
+ diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
639
+
640
+ if diagonals.shape[-2] and diagonals.shape[-2] != 3:
641
+ raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
642
+
643
+ def check_num_lhs_matches_num_rhs():
644
+ if (diagonals.shape[-1] and rhs.shape[-2] and
645
+ diagonals.shape[-1] != rhs.shape[-2]):
646
+ raise ValueError('Expected number of left-hand sided and right-hand '
647
+ 'sides to be equal, got {} and {}'.format(
648
+ diagonals.shape[-1], rhs.shape[-2]))
649
+
650
+ if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:
651
+ # Rhs provided as a vector, ignoring transpose_rhs
652
+ if conjugate_rhs:
653
+ rhs = math_ops.conj(rhs)
654
+ rhs = array_ops.expand_dims(rhs, -1)
655
+ check_num_lhs_matches_num_rhs()
656
+ return array_ops.squeeze(
657
+ linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
658
+ perturb_singular, name), -1)
659
+
660
+ if transpose_rhs:
661
+ rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
662
+ elif conjugate_rhs:
663
+ rhs = math_ops.conj(rhs)
664
+
665
+ check_num_lhs_matches_num_rhs()
666
+ return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
667
+ perturb_singular, name)
668
+
669
+
670
+ @tf_export('linalg.tridiagonal_matmul')
671
+ @dispatch.add_dispatch_support
672
+ def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
673
+ r"""Multiplies tridiagonal matrix by matrix.
674
+
675
+ `diagonals` is representation of 3-diagonal NxN matrix, which depends on
676
+ `diagonals_format`.
677
+
678
+ In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
679
+ two inner-most dimensions representing the square tridiagonal matrices.
680
+ Elements outside of the three diagonals will be ignored.
681
+
682
+ If `sequence` format, `diagonals` is list or tuple of three tensors:
683
+ `[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
684
+ of `superdiag` first element of `subdiag` are ignored.
685
+
686
+ In `compact` format the three diagonals are brought together into one tensor
687
+ of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
688
+ diagonals, and subdiagonals, in order. Similarly to `sequence` format,
689
+ elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
690
+
691
+ The `sequence` format is recommended as the one with the best performance.
692
+
693
+ `rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
694
+
695
+ Example:
696
+
697
+ ```python
698
+ superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
699
+ maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
700
+ subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
701
+ diagonals = [superdiag, maindiag, subdiag]
702
+ rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
703
+ x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
704
+ ```
705
+
706
+ Args:
707
+ diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
708
+ shape depends of `diagonals_format`, see description above. Must be
709
+ `float32`, `float64`, `complex64`, or `complex128`.
710
+ rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
711
+ diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
712
+ name: A name to give this `Op` (optional).
713
+
714
+ Returns:
715
+ A `Tensor` of shape [..., M, N] containing the result of multiplication.
716
+
717
+ Raises:
718
+ ValueError: An unsupported type is provided as input, or when the input
719
+ tensors have incorrect shapes.
720
+ """
721
+ if diagonals_format == 'compact':
722
+ superdiag = diagonals[..., 0, :]
723
+ maindiag = diagonals[..., 1, :]
724
+ subdiag = diagonals[..., 2, :]
725
+ elif diagonals_format == 'sequence':
726
+ superdiag, maindiag, subdiag = diagonals
727
+ elif diagonals_format == 'matrix':
728
+ m1 = tensor_shape.dimension_value(diagonals.shape[-1])
729
+ m2 = tensor_shape.dimension_value(diagonals.shape[-2])
730
+ if m1 and m2 and m1 != m2:
731
+ raise ValueError(
732
+ 'Expected last two dimensions of diagonals to be same, got {} and {}'
733
+ .format(m1, m2))
734
+ diags = array_ops.matrix_diag_part(
735
+ diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
736
+ superdiag = diags[..., 0, :]
737
+ maindiag = diags[..., 1, :]
738
+ subdiag = diags[..., 2, :]
739
+ else:
740
+ raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
741
+
742
+ # C++ backend requires matrices.
743
+ # Converting 1-dimensional vectors to matrices with 1 row.
744
+ superdiag = array_ops.expand_dims(superdiag, -2)
745
+ maindiag = array_ops.expand_dims(maindiag, -2)
746
+ subdiag = array_ops.expand_dims(subdiag, -2)
747
+
748
+ return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
749
+
750
+
751
+ def _maybe_validate_matrix(a, validate_args):
752
+ """Checks that input is a `float` matrix."""
753
+ assertions = []
754
+ if not a.dtype.is_floating:
755
+ raise TypeError('Input `a` must have `float`-like `dtype` '
756
+ '(saw {}).'.format(a.dtype.name))
757
+ if a.shape is not None and a.shape.rank is not None:
758
+ if a.shape.rank < 2:
759
+ raise ValueError('Input `a` must have at least 2 dimensions '
760
+ '(saw: {}).'.format(a.shape.rank))
761
+ elif validate_args:
762
+ assertions.append(
763
+ check_ops.assert_rank_at_least(
764
+ a, rank=2, message='Input `a` must have at least 2 dimensions.'))
765
+ return assertions
766
+
767
+
768
+ @tf_export('linalg.matrix_rank')
769
+ @dispatch.add_dispatch_support
770
+ def matrix_rank(a, tol=None, validate_args=False, name=None):
771
+ """Compute the matrix rank of one or more matrices.
772
+
773
+ Args:
774
+ a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
775
+ pseudo-inverted.
776
+ tol: Threshold below which the singular value is counted as 'zero'.
777
+ Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
778
+ validate_args: When `True`, additional assertions might be embedded in the
779
+ graph.
780
+ Default value: `False` (i.e., no graph assertions are added).
781
+ name: Python `str` prefixed to ops created by this function.
782
+ Default value: 'matrix_rank'.
783
+
784
+ Returns:
785
+ matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
786
+ singular values.
787
+ """
788
+ with ops.name_scope(name or 'matrix_rank'):
789
+ a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
790
+ assertions = _maybe_validate_matrix(a, validate_args)
791
+ if assertions:
792
+ with ops.control_dependencies(assertions):
793
+ a = array_ops.identity(a)
794
+ s = svd(a, compute_uv=False)
795
+ if tol is None:
796
+ if (a.shape[-2:]).is_fully_defined():
797
+ m = np.max(a.shape[-2:].as_list())
798
+ else:
799
+ m = math_ops.reduce_max(array_ops.shape(a)[-2:])
800
+ eps = np.finfo(a.dtype.as_numpy_dtype).eps
801
+ tol = (
802
+ eps * math_ops.cast(m, a.dtype) *
803
+ math_ops.reduce_max(s, axis=-1, keepdims=True))
804
+ return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
805
+
806
+
807
+ @tf_export('linalg.pinv')
808
+ @dispatch.add_dispatch_support
809
+ def pinv(a, rcond=None, validate_args=False, name=None):
810
+ """Compute the Moore-Penrose pseudo-inverse of one or more matrices.
811
+
812
+ Calculate the [generalized inverse of a matrix](
813
+ https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
814
+ singular-value decomposition (SVD) and including all large singular values.
815
+
816
+ The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
817
+ [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
818
+ `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
819
+ `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
820
+ `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
821
+
822
+ This function is analogous to [`numpy.linalg.pinv`](
823
+ https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
824
+ It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
825
+ default `rcond` is `1e-15`. Here the default is
826
+ `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
827
+
828
+ Args:
829
+ a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
830
+ pseudo-inverted.
831
+ rcond: `Tensor` of small singular value cutoffs. Singular values smaller
832
+ (in modulus) than `rcond` * largest_singular_value (again, in modulus) are
833
+ set to zero. Must broadcast against `tf.shape(a)[:-2]`.
834
+ Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
835
+ validate_args: When `True`, additional assertions might be embedded in the
836
+ graph.
837
+ Default value: `False` (i.e., no graph assertions are added).
838
+ name: Python `str` prefixed to ops created by this function.
839
+ Default value: 'pinv'.
840
+
841
+ Returns:
842
+ a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
843
+ rightmost two dimensions are transposed.
844
+
845
+ Raises:
846
+ TypeError: if input `a` does not have `float`-like `dtype`.
847
+ ValueError: if input `a` has fewer than 2 dimensions.
848
+
849
+ #### Examples
850
+
851
+ ```python
852
+ import tensorflow as tf
853
+ import tensorflow_probability as tfp
854
+
855
+ a = tf.constant([[1., 0.4, 0.5],
856
+ [0.4, 0.2, 0.25],
857
+ [0.5, 0.25, 0.35]])
858
+ tf.matmul(tf.linalg.pinv(a), a)
859
+ # ==> array([[1., 0., 0.],
860
+ [0., 1., 0.],
861
+ [0., 0., 1.]], dtype=float32)
862
+
863
+ a = tf.constant([[1., 0.4, 0.5, 1.],
864
+ [0.4, 0.2, 0.25, 2.],
865
+ [0.5, 0.25, 0.35, 3.]])
866
+ tf.matmul(tf.linalg.pinv(a), a)
867
+ # ==> array([[ 0.76, 0.37, 0.21, -0.02],
868
+ [ 0.37, 0.43, -0.33, 0.02],
869
+ [ 0.21, -0.33, 0.81, 0.01],
870
+ [-0.02, 0.02, 0.01, 1. ]], dtype=float32)
871
+ ```
872
+
873
+ #### References
874
+
875
+ [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
876
+ Inc., 1980, pp. 139-142.
877
+ """
878
+ with ops.name_scope(name or 'pinv'):
879
+ a = ops.convert_to_tensor(a, name='a')
880
+
881
+ assertions = _maybe_validate_matrix(a, validate_args)
882
+ if assertions:
883
+ with ops.control_dependencies(assertions):
884
+ a = array_ops.identity(a)
885
+
886
+ dtype = a.dtype.as_numpy_dtype
887
+
888
+ if rcond is None:
889
+
890
+ def get_dim_size(dim):
891
+ dim_val = tensor_shape.dimension_value(a.shape[dim])
892
+ if dim_val is not None:
893
+ return dim_val
894
+ return array_ops.shape(a)[dim]
895
+
896
+ num_rows = get_dim_size(-2)
897
+ num_cols = get_dim_size(-1)
898
+ if isinstance(num_rows, int) and isinstance(num_cols, int):
899
+ max_rows_cols = float(max(num_rows, num_cols))
900
+ else:
901
+ max_rows_cols = math_ops.cast(
902
+ math_ops.maximum(num_rows, num_cols), dtype)
903
+ rcond = 10. * max_rows_cols * np.finfo(dtype).eps
904
+
905
+ rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
906
+
907
+ # Calculate pseudo inverse via SVD.
908
+ # Note: if a is Hermitian then u == v. (We might observe additional
909
+ # performance by explicitly setting `v = u` in such cases.)
910
+ [
911
+ singular_values, # Sigma
912
+ left_singular_vectors, # U
913
+ right_singular_vectors, # V
914
+ ] = svd(
915
+ a, full_matrices=False, compute_uv=True)
916
+
917
+ # Saturate small singular values to inf. This has the effect of make
918
+ # `1. / s = 0.` while not resulting in `NaN` gradients.
919
+ cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
920
+ singular_values = array_ops.where_v2(
921
+ singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
922
+ np.array(np.inf, dtype))
923
+
924
+ # By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
925
+ # is defined as `pinv(a) == v @ inv(s) @ u^H`.
926
+ a_pinv = math_ops.matmul(
927
+ right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
928
+ left_singular_vectors,
929
+ adjoint_b=True)
930
+
931
+ if a.shape is not None and a.shape.rank is not None:
932
+ a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
933
+
934
+ return a_pinv
935
+
936
+
937
+ @tf_export('linalg.lu_solve')
938
+ @dispatch.add_dispatch_support
939
+ def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
940
+ """Solves systems of linear eqns `A X = RHS`, given LU factorizations.
941
+
942
+ Note: this function does not verify the implied matrix is actually invertible
943
+ nor is this condition checked even when `validate_args=True`.
944
+
945
+ Args:
946
+ lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
947
+ matmul(L, U)) = X` then `lower_upper = L + U - eye`.
948
+ perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
949
+ X` then `perm = argmax(P)`.
950
+ rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
951
+ `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
952
+ tf.newaxis])[..., 0]`.
953
+ validate_args: Python `bool` indicating whether arguments should be checked
954
+ for correctness. Note: this function does not verify the implied matrix is
955
+ actually invertible, even when `validate_args=True`.
956
+ Default value: `False` (i.e., don't validate arguments).
957
+ name: Python `str` name given to ops managed by this object.
958
+ Default value: `None` (i.e., 'lu_solve').
959
+
960
+ Returns:
961
+ x: The `X` in `A @ X = RHS`.
962
+
963
+ #### Examples
964
+
965
+ ```python
966
+ import numpy as np
967
+ import tensorflow as tf
968
+ import tensorflow_probability as tfp
969
+
970
+ x = [[[1., 2],
971
+ [3, 4]],
972
+ [[7, 8],
973
+ [3, 4]]]
974
+ inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
975
+ tf.assert_near(tf.matrix_inverse(x), inv_x)
976
+ # ==> True
977
+ ```
978
+
979
+ """
980
+
981
+ with ops.name_scope(name or 'lu_solve'):
982
+ lower_upper = ops.convert_to_tensor(
983
+ lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
984
+ perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
985
+ rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
986
+
987
+ assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
988
+ if assertions:
989
+ with ops.control_dependencies(assertions):
990
+ lower_upper = array_ops.identity(lower_upper)
991
+ perm = array_ops.identity(perm)
992
+ rhs = array_ops.identity(rhs)
993
+
994
+ if (rhs.shape.rank == 2 and perm.shape.rank == 1):
995
+ # Both rhs and perm have scalar batch_shape.
996
+ permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
997
+ else:
998
+ # Either rhs or perm have non-scalar batch_shape or we can't determine
999
+ # this information statically.
1000
+ rhs_shape = array_ops.shape(rhs)
1001
+ broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
1002
+ rhs_shape[:-2],
1003
+ array_ops.shape(perm)[:-1])
1004
+ d, m = rhs_shape[-2], rhs_shape[-1]
1005
+ rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
1006
+ axis=0)
1007
+
1008
+ # Tile out rhs.
1009
+ broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
1010
+ broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
1011
+
1012
+ # Tile out perm and add batch indices.
1013
+ broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
1014
+ broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
1015
+ broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
1016
+ broadcast_batch_indices = array_ops.broadcast_to(
1017
+ math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
1018
+ [broadcast_batch_size, d])
1019
+ broadcast_perm = array_ops_stack.stack(
1020
+ [broadcast_batch_indices, broadcast_perm], axis=-1)
1021
+
1022
+ permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
1023
+ permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
1024
+
1025
+ lower = set_diag(
1026
+ band_part(lower_upper, num_lower=-1, num_upper=0),
1027
+ array_ops.ones(
1028
+ array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
1029
+ return triangular_solve(
1030
+ lower_upper, # Only upper is accessed.
1031
+ triangular_solve(lower, permuted_rhs),
1032
+ lower=False)
1033
+
1034
+
1035
+ @tf_export('linalg.lu_matrix_inverse')
1036
+ @dispatch.add_dispatch_support
1037
+ def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
1038
+ """Computes the inverse given the LU decomposition(s) of one or more matrices.
1039
+
1040
+ This op is conceptually identical to,
1041
+
1042
+ ```python
1043
+ inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
1044
+ tf.assert_near(tf.matrix_inverse(X), inv_X)
1045
+ # ==> True
1046
+ ```
1047
+
1048
+ Note: this function does not verify the implied matrix is actually invertible
1049
+ nor is this condition checked even when `validate_args=True`.
1050
+
1051
+ Args:
1052
+ lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
1053
+ matmul(L, U)) = X` then `lower_upper = L + U - eye`.
1054
+ perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
1055
+ X` then `perm = argmax(P)`.
1056
+ validate_args: Python `bool` indicating whether arguments should be checked
1057
+ for correctness. Note: this function does not verify the implied matrix is
1058
+ actually invertible, even when `validate_args=True`.
1059
+ Default value: `False` (i.e., don't validate arguments).
1060
+ name: Python `str` name given to ops managed by this object.
1061
+ Default value: `None` (i.e., 'lu_matrix_inverse').
1062
+
1063
+ Returns:
1064
+ inv_x: The matrix_inv, i.e.,
1065
+ `tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
1066
+
1067
+ #### Examples
1068
+
1069
+ ```python
1070
+ import numpy as np
1071
+ import tensorflow as tf
1072
+ import tensorflow_probability as tfp
1073
+
1074
+ x = [[[3., 4], [1, 2]],
1075
+ [[7., 8], [3, 4]]]
1076
+ inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
1077
+ tf.assert_near(tf.matrix_inverse(x), inv_x)
1078
+ # ==> True
1079
+ ```
1080
+
1081
+ """
1082
+
1083
+ with ops.name_scope(name or 'lu_matrix_inverse'):
1084
+ lower_upper = ops.convert_to_tensor(
1085
+ lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
1086
+ perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
1087
+ assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
1088
+ if assertions:
1089
+ with ops.control_dependencies(assertions):
1090
+ lower_upper = array_ops.identity(lower_upper)
1091
+ perm = array_ops.identity(perm)
1092
+ shape = array_ops.shape(lower_upper)
1093
+ return lu_solve(
1094
+ lower_upper,
1095
+ perm,
1096
+ rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
1097
+ validate_args=False)
1098
+
1099
+
1100
+ @tf_export('linalg.lu_reconstruct')
1101
+ @dispatch.add_dispatch_support
1102
+ def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
1103
+ """The reconstruct one or more matrices from their LU decomposition(s).
1104
+
1105
+ Args:
1106
+ lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
1107
+ matmul(L, U)) = X` then `lower_upper = L + U - eye`.
1108
+ perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
1109
+ X` then `perm = argmax(P)`.
1110
+ validate_args: Python `bool` indicating whether arguments should be checked
1111
+ for correctness.
1112
+ Default value: `False` (i.e., don't validate arguments).
1113
+ name: Python `str` name given to ops managed by this object.
1114
+ Default value: `None` (i.e., 'lu_reconstruct').
1115
+
1116
+ Returns:
1117
+ x: The original input to `tf.linalg.lu`, i.e., `x` as in,
1118
+ `lu_reconstruct(*tf.linalg.lu(x))`.
1119
+
1120
+ #### Examples
1121
+
1122
+ ```python
1123
+ import numpy as np
1124
+ import tensorflow as tf
1125
+ import tensorflow_probability as tfp
1126
+
1127
+ x = [[[3., 4], [1, 2]],
1128
+ [[7., 8], [3, 4]]]
1129
+ x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
1130
+ tf.assert_near(x, x_reconstructed)
1131
+ # ==> True
1132
+ ```
1133
+
1134
+ """
1135
+ with ops.name_scope(name or 'lu_reconstruct'):
1136
+ lower_upper = ops.convert_to_tensor(
1137
+ lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
1138
+ perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
1139
+
1140
+ assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
1141
+ if assertions:
1142
+ with ops.control_dependencies(assertions):
1143
+ lower_upper = array_ops.identity(lower_upper)
1144
+ perm = array_ops.identity(perm)
1145
+
1146
+ shape = array_ops.shape(lower_upper)
1147
+
1148
+ lower = set_diag(
1149
+ band_part(lower_upper, num_lower=-1, num_upper=0),
1150
+ array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
1151
+ upper = band_part(lower_upper, num_lower=0, num_upper=-1)
1152
+ x = math_ops.matmul(lower, upper)
1153
+
1154
+ if (lower_upper.shape is None or lower_upper.shape.rank is None or
1155
+ lower_upper.shape.rank != 2):
1156
+ # We either don't know the batch rank or there are >0 batch dims.
1157
+ batch_size = math_ops.reduce_prod(shape[:-2])
1158
+ d = shape[-1]
1159
+ x = array_ops.reshape(x, [batch_size, d, d])
1160
+ perm = array_ops.reshape(perm, [batch_size, d])
1161
+ perm = map_fn.map_fn(array_ops.invert_permutation, perm)
1162
+ batch_indices = array_ops.broadcast_to(
1163
+ math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
1164
+ x = array_ops.gather_nd(
1165
+ x, array_ops_stack.stack([batch_indices, perm], axis=-1))
1166
+ x = array_ops.reshape(x, shape)
1167
+ else:
1168
+ x = array_ops.gather(x, array_ops.invert_permutation(perm))
1169
+
1170
+ x.set_shape(lower_upper.shape)
1171
+ return x
1172
+
1173
+
1174
+ def lu_reconstruct_assertions(lower_upper, perm, validate_args):
1175
+ """Returns list of assertions related to `lu_reconstruct` assumptions."""
1176
+ assertions = []
1177
+
1178
+ message = 'Input `lower_upper` must have at least 2 dimensions.'
1179
+ if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
1180
+ raise ValueError(message)
1181
+ elif validate_args:
1182
+ assertions.append(
1183
+ check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
1184
+
1185
+ message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
1186
+ if lower_upper.shape.rank is not None and perm.shape.rank is not None:
1187
+ if lower_upper.shape.rank != perm.shape.rank + 1:
1188
+ raise ValueError(message)
1189
+ elif validate_args:
1190
+ assertions.append(
1191
+ check_ops.assert_rank(
1192
+ lower_upper, rank=array_ops.rank(perm) + 1, message=message))
1193
+
1194
+ message = '`lower_upper` must be square.'
1195
+ if lower_upper.shape[:-2].is_fully_defined():
1196
+ if lower_upper.shape[-2] != lower_upper.shape[-1]:
1197
+ raise ValueError(message)
1198
+ elif validate_args:
1199
+ m, n = array_ops.split(
1200
+ array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
1201
+ assertions.append(check_ops.assert_equal(m, n, message=message))
1202
+
1203
+ return assertions
1204
+
1205
+
1206
+ def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
1207
+ """Returns list of assertions related to `lu_solve` assumptions."""
1208
+ assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
1209
+
1210
+ message = 'Input `rhs` must have at least 2 dimensions.'
1211
+ if rhs.shape.ndims is not None:
1212
+ if rhs.shape.ndims < 2:
1213
+ raise ValueError(message)
1214
+ elif validate_args:
1215
+ assertions.append(
1216
+ check_ops.assert_rank_at_least(rhs, rank=2, message=message))
1217
+
1218
+ message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
1219
+ if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
1220
+ if lower_upper.shape[-1] != rhs.shape[-2]:
1221
+ raise ValueError(message)
1222
+ elif validate_args:
1223
+ assertions.append(
1224
+ check_ops.assert_equal(
1225
+ array_ops.shape(lower_upper)[-1],
1226
+ array_ops.shape(rhs)[-2],
1227
+ message=message))
1228
+
1229
+ return assertions
1230
+
1231
+
1232
+ @tf_export('linalg.eigh_tridiagonal')
1233
+ @dispatch.add_dispatch_support
1234
+ def eigh_tridiagonal(alpha,
1235
+ beta,
1236
+ eigvals_only=True,
1237
+ select='a',
1238
+ select_range=None,
1239
+ tol=None,
1240
+ name=None):
1241
+ """Computes the eigenvalues of a Hermitian tridiagonal matrix.
1242
+
1243
+ Args:
1244
+ alpha: A real or complex tensor of shape (n), the diagonal elements of the
1245
+ matrix. NOTE: If alpha is complex, the imaginary part is ignored (assumed
1246
+ zero) to satisfy the requirement that the matrix be Hermitian.
1247
+ beta: A real or complex tensor of shape (n-1), containing the elements of
1248
+ the first super-diagonal of the matrix. If beta is complex, the first
1249
+ sub-diagonal of the matrix is assumed to be the conjugate of beta to
1250
+ satisfy the requirement that the matrix be Hermitian
1251
+ eigvals_only: If False, both eigenvalues and corresponding eigenvectors are
1252
+ computed. If True, only eigenvalues are computed. Default is True.
1253
+ select: Optional string with values in {‘a’, ‘v’, ‘i’} (default is 'a') that
1254
+ determines which eigenvalues to calculate:
1255
+ 'a': all eigenvalues.
1256
+ ‘v’: eigenvalues in the interval (min, max] given by `select_range`.
1257
+ 'i’: eigenvalues with indices min <= i <= max.
1258
+ select_range: Size 2 tuple or list or tensor specifying the range of
1259
+ eigenvalues to compute together with select. If select is 'a',
1260
+ select_range is ignored.
1261
+ tol: Optional scalar. The absolute tolerance to which each eigenvalue is
1262
+ required. An eigenvalue (or cluster) is considered to have converged if it
1263
+ lies in an interval of this width. If tol is None (default), the value
1264
+ eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the
1265
+ 2-norm of the matrix T.
1266
+ name: Optional name of the op.
1267
+
1268
+ Returns:
1269
+ eig_vals: The eigenvalues of the matrix in non-decreasing order.
1270
+ eig_vectors: If `eigvals_only` is False the eigenvectors are returned in
1271
+ the second output argument.
1272
+
1273
+ Raises:
1274
+ ValueError: If input values are invalid.
1275
+ NotImplemented: Computing eigenvectors for `eigvals_only` = False is
1276
+ not implemented yet.
1277
+
1278
+ This op implements a subset of the functionality of
1279
+ scipy.linalg.eigh_tridiagonal.
1280
+
1281
+ Note: The result is undefined if the input contains +/-inf or NaN, or if
1282
+ any value in beta has a magnitude greater than
1283
+ `numpy.sqrt(numpy.finfo(beta.dtype.as_numpy_dtype).max)`.
1284
+
1285
+
1286
+ TODO(b/187527398):
1287
+ Add support for outer batch dimensions.
1288
+
1289
+ #### Examples
1290
+
1291
+ ```python
1292
+ import numpy
1293
+ eigvals = tf.linalg.eigh_tridiagonal([0.0, 0.0, 0.0], [1.0, 1.0])
1294
+ eigvals_expected = [-numpy.sqrt(2.0), 0.0, numpy.sqrt(2.0)]
1295
+ tf.assert_near(eigvals_expected, eigvals)
1296
+ # ==> True
1297
+ ```
1298
+
1299
+ """
1300
+ with ops.name_scope(name or 'eigh_tridiagonal'):
1301
+
1302
+ def _compute_eigenvalues(alpha, beta):
1303
+ """Computes all eigenvalues of a Hermitian tridiagonal matrix."""
1304
+
1305
+ def _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, x):
1306
+ """Implements the Sturm sequence recurrence."""
1307
+ with ops.name_scope('sturm'):
1308
+ n = alpha.shape[0]
1309
+ zeros = array_ops.zeros(array_ops.shape(x), dtype=dtypes.int32)
1310
+ ones = array_ops.ones(array_ops.shape(x), dtype=dtypes.int32)
1311
+
1312
+ # The first step in the Sturm sequence recurrence
1313
+ # requires special care if x is equal to alpha[0].
1314
+ def sturm_step0():
1315
+ q = alpha[0] - x
1316
+ count = array_ops.where(q < 0, ones, zeros)
1317
+ q = array_ops.where(
1318
+ math_ops.equal(alpha[0], x), alpha0_perturbation, q)
1319
+ return q, count
1320
+
1321
+ # Subsequent steps all take this form:
1322
+ def sturm_step(i, q, count):
1323
+ q = alpha[i] - beta_sq[i - 1] / q - x
1324
+ count = array_ops.where(q <= pivmin, count + 1, count)
1325
+ q = array_ops.where(q <= pivmin, math_ops.minimum(q, -pivmin), q)
1326
+ return q, count
1327
+
1328
+ # The first step initializes q and count.
1329
+ q, count = sturm_step0()
1330
+
1331
+ # Peel off ((n-1) % blocksize) steps from the main loop, so we can run
1332
+ # the bulk of the iterations unrolled by a factor of blocksize.
1333
+ blocksize = 16
1334
+ i = 1
1335
+ peel = (n - 1) % blocksize
1336
+ unroll_cnt = peel
1337
+
1338
+ def unrolled_steps(start, q, count):
1339
+ for j in range(unroll_cnt):
1340
+ q, count = sturm_step(start + j, q, count)
1341
+ return start + unroll_cnt, q, count
1342
+
1343
+ i, q, count = unrolled_steps(i, q, count)
1344
+
1345
+ # Run the remaining steps of the Sturm sequence using a partially
1346
+ # unrolled while loop.
1347
+ unroll_cnt = blocksize
1348
+ cond = lambda i, q, count: math_ops.less(i, n)
1349
+ _, _, count = while_loop.while_loop(
1350
+ cond, unrolled_steps, [i, q, count], back_prop=False)
1351
+ return count
1352
+
1353
+ with ops.name_scope('compute_eigenvalues'):
1354
+ if alpha.dtype.is_complex:
1355
+ alpha = math_ops.real(alpha)
1356
+ beta_sq = math_ops.real(math_ops.conj(beta) * beta)
1357
+ beta_abs = math_ops.sqrt(beta_sq)
1358
+ else:
1359
+ beta_sq = math_ops.square(beta)
1360
+ beta_abs = math_ops.abs(beta)
1361
+
1362
+ # Estimate the largest and smallest eigenvalues of T using the
1363
+ # Gershgorin circle theorem.
1364
+ finfo = np.finfo(alpha.dtype.as_numpy_dtype)
1365
+ off_diag_abs_row_sum = array_ops.concat(
1366
+ [beta_abs[:1], beta_abs[:-1] + beta_abs[1:], beta_abs[-1:]], axis=0)
1367
+ lambda_est_max = math_ops.minimum(
1368
+ finfo.max, math_ops.reduce_max(alpha + off_diag_abs_row_sum))
1369
+ lambda_est_min = math_ops.maximum(
1370
+ finfo.min, math_ops.reduce_min(alpha - off_diag_abs_row_sum))
1371
+ # Upper bound on 2-norm of T.
1372
+ t_norm = math_ops.maximum(
1373
+ math_ops.abs(lambda_est_min), math_ops.abs(lambda_est_max))
1374
+
1375
+ # Compute the smallest allowed pivot in the Sturm sequence to avoid
1376
+ # overflow.
1377
+ one = np.ones([], dtype=alpha.dtype.as_numpy_dtype)
1378
+ safemin = np.maximum(one / finfo.max, (one + finfo.eps) * finfo.tiny)
1379
+ pivmin = safemin * math_ops.maximum(one, math_ops.reduce_max(beta_sq))
1380
+ alpha0_perturbation = math_ops.square(finfo.eps * beta_abs[0])
1381
+ abs_tol = finfo.eps * t_norm
1382
+ if tol:
1383
+ abs_tol = math_ops.maximum(tol, abs_tol)
1384
+ # In the worst case, when the absolute tolerance is eps*lambda_est_max
1385
+ # and lambda_est_max = -lambda_est_min, we have to take as many
1386
+ # bisection steps as there are bits in the mantissa plus 1.
1387
+ max_it = finfo.nmant + 1
1388
+
1389
+ # Determine the indices of the desired eigenvalues, based on select
1390
+ # and select_range.
1391
+ asserts = None
1392
+ if select == 'a':
1393
+ target_counts = math_ops.range(n)
1394
+ elif select == 'i':
1395
+ asserts = check_ops.assert_less_equal(
1396
+ select_range[0],
1397
+ select_range[1],
1398
+ message='Got empty index range in select_range.')
1399
+ target_counts = math_ops.range(select_range[0], select_range[1] + 1)
1400
+ elif select == 'v':
1401
+ asserts = check_ops.assert_less(
1402
+ select_range[0],
1403
+ select_range[1],
1404
+ message='Got empty interval in select_range.')
1405
+ else:
1406
+ raise ValueError("'select must have a value in {'a', 'i', 'v'}.")
1407
+
1408
+ if asserts:
1409
+ with ops.control_dependencies([asserts]):
1410
+ alpha = array_ops.identity(alpha)
1411
+
1412
+ # Run binary search for all desired eigenvalues in parallel, starting
1413
+ # from an interval slightly wider than the estimated
1414
+ # [lambda_est_min, lambda_est_max].
1415
+ fudge = 2.1 # We widen starting interval the Gershgorin interval a bit.
1416
+ norm_slack = math_ops.cast(n, alpha.dtype) * fudge * finfo.eps * t_norm
1417
+ if select in {'a', 'i'}:
1418
+ lower = lambda_est_min - norm_slack - 2 * fudge * pivmin
1419
+ upper = lambda_est_max + norm_slack + fudge * pivmin
1420
+ else:
1421
+ # Count the number of eigenvalues in the given range.
1422
+ lower = select_range[0] - norm_slack - 2 * fudge * pivmin
1423
+ upper = select_range[1] + norm_slack + fudge * pivmin
1424
+ first = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, lower)
1425
+ last = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, upper)
1426
+ target_counts = math_ops.range(first, last)
1427
+
1428
+ # Pre-broadcast the scalars used in the Sturm sequence for improved
1429
+ # performance.
1430
+ upper = math_ops.minimum(upper, finfo.max)
1431
+ lower = math_ops.maximum(lower, finfo.min)
1432
+ target_shape = array_ops.shape(target_counts)
1433
+ lower = array_ops.broadcast_to(lower, shape=target_shape)
1434
+ upper = array_ops.broadcast_to(upper, shape=target_shape)
1435
+ pivmin = array_ops.broadcast_to(pivmin, target_shape)
1436
+ alpha0_perturbation = array_ops.broadcast_to(alpha0_perturbation,
1437
+ target_shape)
1438
+
1439
+ # We compute the midpoint as 0.5*lower + 0.5*upper to avoid overflow in
1440
+ # (lower + upper) or (upper - lower) when the matrix has eigenvalues
1441
+ # with magnitude greater than finfo.max / 2.
1442
+ def midpoint(lower, upper):
1443
+ return (0.5 * lower) + (0.5 * upper)
1444
+
1445
+ def continue_binary_search(i, lower, upper):
1446
+ return math_ops.logical_and(
1447
+ math_ops.less(i, max_it),
1448
+ math_ops.less(abs_tol, math_ops.reduce_max(upper - lower)))
1449
+
1450
+ def binary_search_step(i, lower, upper):
1451
+ mid = midpoint(lower, upper)
1452
+ counts = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, mid)
1453
+ lower = array_ops.where(counts <= target_counts, mid, lower)
1454
+ upper = array_ops.where(counts > target_counts, mid, upper)
1455
+ return i + 1, lower, upper
1456
+
1457
+ # Start parallel binary searches.
1458
+ _, lower, upper = while_loop.while_loop(continue_binary_search,
1459
+ binary_search_step,
1460
+ [0, lower, upper])
1461
+ return midpoint(lower, upper)
1462
+
1463
+ def _compute_eigenvectors(alpha, beta, eigvals):
1464
+ """Implements inverse iteration to compute eigenvectors."""
1465
+ with ops.name_scope('compute_eigenvectors'):
1466
+ k = array_ops.size(eigvals)
1467
+ n = array_ops.size(alpha)
1468
+ alpha = math_ops.cast(alpha, dtype=beta.dtype)
1469
+
1470
+ # Eigenvectors corresponding to cluster of close eigenvalues are
1471
+ # not unique and need to be explicitly orthogonalized. Here we
1472
+ # identify such clusters. Note: This function assumes that
1473
+ # eigenvalues are sorted in non-decreasing order.
1474
+ gap = eigvals[1:] - eigvals[:-1]
1475
+ eps = np.finfo(eigvals.dtype.as_numpy_dtype).eps
1476
+ t_norm = math_ops.maximum(
1477
+ math_ops.abs(eigvals[0]), math_ops.abs(eigvals[-1]))
1478
+ gaptol = np.sqrt(eps) * t_norm
1479
+ # Find the beginning and end of runs of eigenvectors corresponding
1480
+ # to eigenvalues closer than "gaptol", which will need to be
1481
+ # orthogonalized against each other.
1482
+ close = math_ops.less(gap, gaptol)
1483
+ left_neighbor_close = array_ops.concat([[False], close], axis=0)
1484
+ right_neighbor_close = array_ops.concat([close, [False]], axis=0)
1485
+ ortho_interval_start = math_ops.logical_and(
1486
+ math_ops.logical_not(left_neighbor_close), right_neighbor_close)
1487
+ ortho_interval_start = array_ops.squeeze(
1488
+ array_ops.where_v2(ortho_interval_start), axis=-1)
1489
+ ortho_interval_end = math_ops.logical_and(
1490
+ left_neighbor_close, math_ops.logical_not(right_neighbor_close))
1491
+ ortho_interval_end = array_ops.squeeze(
1492
+ array_ops.where_v2(ortho_interval_end), axis=-1) + 1
1493
+ num_clusters = array_ops.size(ortho_interval_end)
1494
+
1495
+ # We perform inverse iteration for all eigenvectors in parallel,
1496
+ # starting from a random set of vectors, until all have converged.
1497
+ v0 = math_ops.cast(
1498
+ stateless_random_ops.stateless_random_normal(
1499
+ shape=(k, n), seed=[7, 42]),
1500
+ dtype=beta.dtype)
1501
+ nrm_v = norm(v0, axis=1)
1502
+ v0 = v0 / nrm_v[:, array_ops.newaxis]
1503
+ zero_nrm = constant_op.constant(0, shape=nrm_v.shape, dtype=nrm_v.dtype)
1504
+
1505
+ # Replicate alpha-eigvals(ik) and beta across the k eigenvectors so we
1506
+ # can solve the k systems
1507
+ # [T - eigvals(i)*eye(n)] x_i = r_i
1508
+ # simultaneously using the batching mechanism.
1509
+ eigvals_cast = math_ops.cast(eigvals, dtype=beta.dtype)
1510
+ alpha_shifted = (
1511
+ alpha[array_ops.newaxis, :] - eigvals_cast[:, array_ops.newaxis])
1512
+ beta = array_ops.tile(beta[array_ops.newaxis, :], [k, 1])
1513
+ diags = [beta, alpha_shifted, math_ops.conj(beta)]
1514
+
1515
+ def orthogonalize_close_eigenvectors(eigenvectors):
1516
+ # Eigenvectors corresponding to a cluster of close eigenvalues are not
1517
+ # uniquely defined, but the subspace they span is. To avoid numerical
1518
+ # instability, we explicitly mutually orthogonalize such eigenvectors
1519
+ # after each step of inverse iteration. It is customary to use
1520
+ # modified Gram-Schmidt for this, but this is not very efficient
1521
+ # on some platforms, so here we defer to the QR decomposition in
1522
+ # TensorFlow.
1523
+ def orthogonalize_cluster(cluster_idx, eigenvectors):
1524
+ start = ortho_interval_start[cluster_idx]
1525
+ end = ortho_interval_end[cluster_idx]
1526
+ update_indices = array_ops.expand_dims(
1527
+ math_ops.range(start, end), -1)
1528
+ vectors_in_cluster = eigenvectors[start:end, :]
1529
+ # We use the builtin QR factorization to orthonormalize the
1530
+ # vectors in the cluster.
1531
+ q, _ = qr(transpose(vectors_in_cluster))
1532
+ vectors_to_update = transpose(q)
1533
+ eigenvectors = array_ops.tensor_scatter_nd_update(
1534
+ eigenvectors, update_indices, vectors_to_update)
1535
+ return cluster_idx + 1, eigenvectors
1536
+
1537
+ _, eigenvectors = while_loop.while_loop(
1538
+ lambda i, ev: math_ops.less(i, num_clusters),
1539
+ orthogonalize_cluster, [0, eigenvectors])
1540
+ return eigenvectors
1541
+
1542
+ def continue_iteration(i, _, nrm_v, nrm_v_old):
1543
+ max_it = 5 # Taken from LAPACK xSTEIN.
1544
+ min_norm_growth = 0.1
1545
+ norm_growth_factor = constant_op.constant(
1546
+ 1 + min_norm_growth, dtype=nrm_v.dtype)
1547
+ # We stop the inverse iteration when we reach the maximum number of
1548
+ # iterations or the norm growths is less than 10%.
1549
+ return math_ops.logical_and(
1550
+ math_ops.less(i, max_it),
1551
+ math_ops.reduce_any(
1552
+ math_ops.greater_equal(
1553
+ math_ops.real(nrm_v),
1554
+ math_ops.real(norm_growth_factor * nrm_v_old))))
1555
+
1556
+ def inverse_iteration_step(i, v, nrm_v, nrm_v_old):
1557
+ v = tridiagonal_solve(
1558
+ diags,
1559
+ v,
1560
+ diagonals_format='sequence',
1561
+ partial_pivoting=True,
1562
+ perturb_singular=True)
1563
+ nrm_v_old = nrm_v
1564
+ nrm_v = norm(v, axis=1)
1565
+ v = v / nrm_v[:, array_ops.newaxis]
1566
+ v = orthogonalize_close_eigenvectors(v)
1567
+ return i + 1, v, nrm_v, nrm_v_old
1568
+
1569
+ _, v, nrm_v, _ = while_loop.while_loop(continue_iteration,
1570
+ inverse_iteration_step,
1571
+ [0, v0, nrm_v, zero_nrm])
1572
+ return transpose(v)
1573
+
1574
+ alpha = ops.convert_to_tensor(alpha, name='alpha')
1575
+ n = alpha.shape[0]
1576
+ if n <= 1:
1577
+ return math_ops.real(alpha)
1578
+ beta = ops.convert_to_tensor(beta, name='beta')
1579
+
1580
+ if alpha.dtype != beta.dtype:
1581
+ raise ValueError("'alpha' and 'beta' must have the same type.")
1582
+
1583
+ eigvals = _compute_eigenvalues(alpha, beta)
1584
+ if eigvals_only:
1585
+ return eigvals
1586
+
1587
+ eigvectors = _compute_eigenvectors(alpha, beta, eigvals)
1588
+ return eigvals, eigvectors
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Base class for linear operators."""
16
+
17
+ import abc
18
+ import contextlib
19
+
20
+ import numpy as np
21
+
22
+ from tensorflow.python.framework import composite_tensor
23
+ from tensorflow.python.framework import composite_tensor_gradient
24
+ from tensorflow.python.framework import dtypes
25
+ from tensorflow.python.framework import ops
26
+ from tensorflow.python.framework import tensor_conversion
27
+ from tensorflow.python.framework import tensor_shape
28
+ from tensorflow.python.framework import tensor_spec
29
+ from tensorflow.python.framework import tensor_util
30
+ from tensorflow.python.framework import type_spec
31
+ from tensorflow.python.framework import type_spec_registry
32
+ from tensorflow.python.module import module
33
+ from tensorflow.python.ops import array_ops
34
+ from tensorflow.python.ops import check_ops
35
+ from tensorflow.python.ops import linalg_ops
36
+ from tensorflow.python.ops import math_ops
37
+ from tensorflow.python.ops import resource_variable_ops
38
+ from tensorflow.python.ops import variables
39
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
40
+ from tensorflow.python.ops.linalg import linear_operator_util
41
+ from tensorflow.python.ops.linalg import property_hint_util
42
+ from tensorflow.python.ops.linalg import slicing
43
+ from tensorflow.python.platform import tf_logging as logging
44
+ from tensorflow.python.trackable import data_structures
45
+ from tensorflow.python.util import deprecation
46
+ from tensorflow.python.util import dispatch
47
+ from tensorflow.python.util import nest
48
+ from tensorflow.python.util import variable_utils
49
+ from tensorflow.python.util.tf_export import tf_export
50
+
51
+
52
+ __all__ = ["LinearOperator"]
53
+
54
+
55
+ # pylint: disable=protected-access
56
+ class _LinearOperatorGradient(
57
+ composite_tensor_gradient.CompositeTensorGradient):
58
+ """Composite tensor gradient for `LinearOperator`."""
59
+
60
+ def get_gradient_components(self, value):
61
+ return value._type_spec._to_components(value)
62
+
63
+ def replace_gradient_components(self, value, components):
64
+ flat_components = nest.flatten(components)
65
+
66
+ # If all component gradients are disconnected, return None.
67
+ if all(c is None for c in flat_components):
68
+ return None
69
+
70
+ # TODO(b/286565628): Update this once `CompositeTensorGradient` fully
71
+ # supports `tf.UnconnectedGradients.ZERO`.
72
+ # Replace individual disconnected component gradients with zeros.
73
+ value_components = value._type_spec._to_components(value)
74
+ flat_grad_components = []
75
+ for gc, vc in zip(flat_components, nest.flatten(value_components)):
76
+ if gc is None:
77
+ flat_grad_components.append(
78
+ nest.map_structure(
79
+ lambda x: array_ops.zeros_like(x, dtype=value.dtype),
80
+ vc,
81
+ expand_composites=True))
82
+ else:
83
+ flat_grad_components.append(gc)
84
+ grad_components = nest.pack_sequence_as(
85
+ value_components, flat_grad_components)
86
+ return value._type_spec._from_components(grad_components)
87
+ # pylint: enable=protected-access
88
+
89
+
90
+ # TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
91
+ @tf_export("linalg.LinearOperator")
92
+ class LinearOperator(
93
+ module.Module, composite_tensor.CompositeTensor, metaclass=abc.ABCMeta):
94
+ """Base class defining a [batch of] linear operator[s].
95
+
96
+ Subclasses of `LinearOperator` provide access to common methods on a
97
+ (batch) matrix, without the need to materialize the matrix. This allows:
98
+
99
+ * Matrix free computations
100
+ * Operators that take advantage of special structure, while providing a
101
+ consistent API to users.
102
+
103
+ #### Subclassing
104
+
105
+ To enable a public method, subclasses should implement the leading-underscore
106
+ version of the method. The argument signature should be identical except for
107
+ the omission of `name="..."`. For example, to enable
108
+ `matmul(x, adjoint=False, name="matmul")` a subclass should implement
109
+ `_matmul(x, adjoint=False)`.
110
+
111
+ #### Performance contract
112
+
113
+ Subclasses should only implement the assert methods
114
+ (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
115
+ time.
116
+
117
+ Class docstrings should contain an explanation of computational complexity.
118
+ Since this is a high-performance library, attention should be paid to detail,
119
+ and explanations can include constants as well as Big-O notation.
120
+
121
+ #### Shape compatibility
122
+
123
+ `LinearOperator` subclasses should operate on a [batch] matrix with
124
+ compatible shape. Class docstrings should define what is meant by compatible
125
+ shape. Some subclasses may not support batching.
126
+
127
+ Examples:
128
+
129
+ `x` is a batch matrix with compatible shape for `matmul` if
130
+
131
+ ```
132
+ operator.shape = [B1,...,Bb] + [M, N], b >= 0,
133
+ x.shape = [B1,...,Bb] + [N, R]
134
+ ```
135
+
136
+ `rhs` is a batch matrix with compatible shape for `solve` if
137
+
138
+ ```
139
+ operator.shape = [B1,...,Bb] + [M, N], b >= 0,
140
+ rhs.shape = [B1,...,Bb] + [M, R]
141
+ ```
142
+
143
+ #### Example docstring for subclasses.
144
+
145
+ This operator acts like a (batch) matrix `A` with shape
146
+ `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
147
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
148
+ an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
149
+ purposes of identifying and working with compatible arguments the shape is
150
+ relevant.
151
+
152
+ Examples:
153
+
154
+ ```python
155
+ some_tensor = ... shape = ????
156
+ operator = MyLinOp(some_tensor)
157
+
158
+ operator.shape()
159
+ ==> [2, 4, 4]
160
+
161
+ operator.log_abs_determinant()
162
+ ==> Shape [2] Tensor
163
+
164
+ x = ... Shape [2, 4, 5] Tensor
165
+
166
+ operator.matmul(x)
167
+ ==> Shape [2, 4, 5] Tensor
168
+ ```
169
+
170
+ #### Shape compatibility
171
+
172
+ This operator acts on batch matrices with compatible shape.
173
+ FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
174
+
175
+ #### Performance
176
+
177
+ FILL THIS IN
178
+
179
+ #### Matrix property hints
180
+
181
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
182
+ for `X = non_singular, self_adjoint, positive_definite, square`.
183
+ These have the following meaning:
184
+
185
+ * If `is_X == True`, callers should expect the operator to have the
186
+ property `X`. This is a promise that should be fulfilled, but is *not* a
187
+ runtime assert. For example, finite floating point precision may result
188
+ in these promises being violated.
189
+ * If `is_X == False`, callers should expect the operator to not have `X`.
190
+ * If `is_X == None` (the default), callers should have no expectation either
191
+ way.
192
+
193
+ #### Initialization parameters
194
+
195
+ All subclasses of `LinearOperator` are expected to pass a `parameters`
196
+ argument to `super().__init__()`. This should be a `dict` containing
197
+ the unadulterated arguments passed to the subclass `__init__`. For example,
198
+ `MyLinearOperator` with an initializer should look like:
199
+
200
+ ```python
201
+ def __init__(self, operator, is_square=False, name=None):
202
+ parameters = dict(
203
+ operator=operator,
204
+ is_square=is_square,
205
+ name=name
206
+ )
207
+ ...
208
+ super().__init__(..., parameters=parameters)
209
+ ```
210
+
211
+ Users can then access `my_linear_operator.parameters` to see all arguments
212
+ passed to its initializer.
213
+ """
214
+
215
+ # TODO(b/143910018) Remove graph_parents in V3.
216
+ @deprecation.deprecated_args(None, "Do not pass `graph_parents`. They will "
217
+ " no longer be used.", "graph_parents")
218
+ def __init__(self,
219
+ dtype,
220
+ graph_parents=None,
221
+ is_non_singular=None,
222
+ is_self_adjoint=None,
223
+ is_positive_definite=None,
224
+ is_square=None,
225
+ name=None,
226
+ parameters=None):
227
+ """Initialize the `LinearOperator`.
228
+
229
+ **This is a private method for subclass use.**
230
+ **Subclasses should copy-paste this `__init__` documentation.**
231
+
232
+ Args:
233
+ dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
234
+ `solve` will have to be this type.
235
+ graph_parents: (Deprecated) Python list of graph prerequisites of this
236
+ `LinearOperator` Typically tensors that are passed during initialization
237
+ is_non_singular: Expect that this operator is non-singular.
238
+ is_self_adjoint: Expect that this operator is equal to its hermitian
239
+ transpose. If `dtype` is real, this is equivalent to being symmetric.
240
+ is_positive_definite: Expect that this operator is positive definite,
241
+ meaning the quadratic form `x^H A x` has positive real part for all
242
+ nonzero `x`. Note that we do not require the operator to be
243
+ self-adjoint to be positive-definite. See:
244
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
245
+ is_square: Expect that this operator acts like square [batch] matrices.
246
+ name: A name for this `LinearOperator`.
247
+ parameters: Python `dict` of parameters used to instantiate this
248
+ `LinearOperator`.
249
+
250
+ Raises:
251
+ ValueError: If any member of graph_parents is `None` or not a `Tensor`.
252
+ ValueError: If hints are set incorrectly.
253
+ """
254
+ # Check and auto-set flags.
255
+ if is_positive_definite:
256
+ if is_non_singular is False:
257
+ raise ValueError("A positive definite matrix is always non-singular.")
258
+ is_non_singular = True
259
+
260
+ if is_non_singular:
261
+ if is_square is False:
262
+ raise ValueError("A non-singular matrix is always square.")
263
+ is_square = True
264
+
265
+ if is_self_adjoint:
266
+ if is_square is False:
267
+ raise ValueError("A self-adjoint matrix is always square.")
268
+ is_square = True
269
+
270
+ self._is_square_set_or_implied_by_hints = is_square
271
+
272
+ if graph_parents is not None:
273
+ self._set_graph_parents(graph_parents)
274
+ else:
275
+ self._graph_parents = []
276
+ self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype
277
+ self._is_non_singular = is_non_singular
278
+ self._is_self_adjoint = is_self_adjoint
279
+ self._is_positive_definite = is_positive_definite
280
+ self._parameters = self._no_dependency(parameters)
281
+ self._parameters_sanitized = False
282
+ self._name = name or type(self).__name__
283
+
284
+ @contextlib.contextmanager
285
+ def _name_scope(self, name=None): # pylint: disable=method-hidden
286
+ """Helper function to standardize op scope."""
287
+ full_name = self.name
288
+ if name is not None:
289
+ full_name += "/" + name
290
+ with ops.name_scope(full_name) as scope:
291
+ yield scope
292
+
293
+ @property
294
+ def parameters(self):
295
+ """Dictionary of parameters used to instantiate this `LinearOperator`."""
296
+ return dict(self._parameters)
297
+
298
+ @property
299
+ def dtype(self):
300
+ """The `DType` of `Tensor`s handled by this `LinearOperator`."""
301
+ return self._dtype
302
+
303
+ @property
304
+ def name(self):
305
+ """Name prepended to all ops created by this `LinearOperator`."""
306
+ return self._name
307
+
308
+ @property
309
+ @deprecation.deprecated(None, "Do not call `graph_parents`.")
310
+ def graph_parents(self):
311
+ """List of graph dependencies of this `LinearOperator`."""
312
+ return self._graph_parents
313
+
314
+ @property
315
+ def is_non_singular(self):
316
+ return self._is_non_singular
317
+
318
+ @property
319
+ def is_self_adjoint(self):
320
+ return self._is_self_adjoint
321
+
322
+ @property
323
+ def is_positive_definite(self):
324
+ return self._is_positive_definite
325
+
326
+ @property
327
+ def is_square(self):
328
+ """Return `True/False` depending on if this operator is square."""
329
+ # Static checks done after __init__. Why? Because domain/range dimension
330
+ # sometimes requires lots of work done in the derived class after init.
331
+ auto_square_check = self.domain_dimension == self.range_dimension
332
+ if self._is_square_set_or_implied_by_hints is False and auto_square_check:
333
+ raise ValueError(
334
+ "User set is_square hint to False, but the operator was square.")
335
+ if self._is_square_set_or_implied_by_hints is None:
336
+ return auto_square_check
337
+
338
+ return self._is_square_set_or_implied_by_hints
339
+
340
+ @abc.abstractmethod
341
+ def _shape(self):
342
+ # Write this in derived class to enable all static shape methods.
343
+ raise NotImplementedError("_shape is not implemented.")
344
+
345
+ @property
346
+ def shape(self):
347
+ """`TensorShape` of this `LinearOperator`.
348
+
349
+ If this operator acts like the batch matrix `A` with
350
+ `A.shape = [B1,...,Bb, M, N]`, then this returns
351
+ `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.shape`.
352
+
353
+ Returns:
354
+ `TensorShape`, statically determined, may be undefined.
355
+ """
356
+ return self._shape()
357
+
358
+ def _shape_tensor(self):
359
+ # This is not an abstractmethod, since we want derived classes to be able to
360
+ # override this with optional kwargs, which can reduce the number of
361
+ # `convert_to_tensor` calls. See derived classes for examples.
362
+ raise NotImplementedError("_shape_tensor is not implemented.")
363
+
364
+ def shape_tensor(self, name="shape_tensor"):
365
+ """Shape of this `LinearOperator`, determined at runtime.
366
+
367
+ If this operator acts like the batch matrix `A` with
368
+ `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
369
+ `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
370
+
371
+ Args:
372
+ name: A name for this `Op`.
373
+
374
+ Returns:
375
+ `int32` `Tensor`
376
+ """
377
+ with self._name_scope(name): # pylint: disable=not-callable
378
+ # Prefer to use statically defined shape if available.
379
+ if self.shape.is_fully_defined():
380
+ return linear_operator_util.shape_tensor(self.shape.as_list())
381
+ else:
382
+ return self._shape_tensor()
383
+
384
+ @property
385
+ def batch_shape(self):
386
+ """`TensorShape` of batch dimensions of this `LinearOperator`.
387
+
388
+ If this operator acts like the batch matrix `A` with
389
+ `A.shape = [B1,...,Bb, M, N]`, then this returns
390
+ `TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]`
391
+
392
+ Returns:
393
+ `TensorShape`, statically determined, may be undefined.
394
+ """
395
+ # Derived classes get this "for free" once .shape is implemented.
396
+ return self.shape[:-2]
397
+
398
+ def batch_shape_tensor(self, name="batch_shape_tensor"):
399
+ """Shape of batch dimensions of this operator, determined at runtime.
400
+
401
+ If this operator acts like the batch matrix `A` with
402
+ `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
403
+ `[B1,...,Bb]`.
404
+
405
+ Args:
406
+ name: A name for this `Op`.
407
+
408
+ Returns:
409
+ `int32` `Tensor`
410
+ """
411
+ # Derived classes get this "for free" once .shape() is implemented.
412
+ with self._name_scope(name): # pylint: disable=not-callable
413
+ return self._batch_shape_tensor()
414
+
415
+ def _batch_shape_tensor(self, shape=None):
416
+ # `shape` may be passed in if this can be pre-computed in a
417
+ # more efficient manner, e.g. without excessive Tensor conversions.
418
+ if self.batch_shape.is_fully_defined():
419
+ return linear_operator_util.shape_tensor(
420
+ self.batch_shape.as_list(), name="batch_shape")
421
+ else:
422
+ shape = self.shape_tensor() if shape is None else shape
423
+ return shape[:-2]
424
+
425
+ @property
426
+ def tensor_rank(self, name="tensor_rank"):
427
+ """Rank (in the sense of tensors) of matrix corresponding to this operator.
428
+
429
+ If this operator acts like the batch matrix `A` with
430
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
431
+
432
+ Args:
433
+ name: A name for this `Op`.
434
+
435
+ Returns:
436
+ Python integer, or None if the tensor rank is undefined.
437
+ """
438
+ # Derived classes get this "for free" once .shape() is implemented.
439
+ with self._name_scope(name): # pylint: disable=not-callable
440
+ return self.shape.ndims
441
+
442
+ def tensor_rank_tensor(self, name="tensor_rank_tensor"):
443
+ """Rank (in the sense of tensors) of matrix corresponding to this operator.
444
+
445
+ If this operator acts like the batch matrix `A` with
446
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
447
+
448
+ Args:
449
+ name: A name for this `Op`.
450
+
451
+ Returns:
452
+ `int32` `Tensor`, determined at runtime.
453
+ """
454
+ # Derived classes get this "for free" once .shape() is implemented.
455
+ with self._name_scope(name): # pylint: disable=not-callable
456
+ return self._tensor_rank_tensor()
457
+
458
+ def _tensor_rank_tensor(self, shape=None):
459
+ # `shape` may be passed in if this can be pre-computed in a
460
+ # more efficient manner, e.g. without excessive Tensor conversions.
461
+ if self.tensor_rank is not None:
462
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(
463
+ self.tensor_rank
464
+ )
465
+ else:
466
+ shape = self.shape_tensor() if shape is None else shape
467
+ return array_ops.size(shape)
468
+
469
+ @property
470
+ def domain_dimension(self):
471
+ """Dimension (in the sense of vector spaces) of the domain of this operator.
472
+
473
+ If this operator acts like the batch matrix `A` with
474
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
475
+
476
+ Returns:
477
+ `Dimension` object.
478
+ """
479
+ # Derived classes get this "for free" once .shape is implemented.
480
+ if self.shape.rank is None:
481
+ return tensor_shape.Dimension(None)
482
+ else:
483
+ return self.shape.dims[-1]
484
+
485
+ def domain_dimension_tensor(self, name="domain_dimension_tensor"):
486
+ """Dimension (in the sense of vector spaces) of the domain of this operator.
487
+
488
+ Determined at runtime.
489
+
490
+ If this operator acts like the batch matrix `A` with
491
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
492
+
493
+ Args:
494
+ name: A name for this `Op`.
495
+
496
+ Returns:
497
+ `int32` `Tensor`
498
+ """
499
+ # Derived classes get this "for free" once .shape() is implemented.
500
+ with self._name_scope(name): # pylint: disable=not-callable
501
+ return self._domain_dimension_tensor()
502
+
503
+ def _domain_dimension_tensor(self, shape=None):
504
+ # `shape` may be passed in if this can be pre-computed in a
505
+ # more efficient manner, e.g. without excessive Tensor conversions.
506
+ dim_value = tensor_shape.dimension_value(self.domain_dimension)
507
+ if dim_value is not None:
508
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(dim_value)
509
+ else:
510
+ shape = self.shape_tensor() if shape is None else shape
511
+ return shape[-1]
512
+
513
+ @property
514
+ def range_dimension(self):
515
+ """Dimension (in the sense of vector spaces) of the range of this operator.
516
+
517
+ If this operator acts like the batch matrix `A` with
518
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
519
+
520
+ Returns:
521
+ `Dimension` object.
522
+ """
523
+ # Derived classes get this "for free" once .shape is implemented.
524
+ if self.shape.dims:
525
+ return self.shape.dims[-2]
526
+ else:
527
+ return tensor_shape.Dimension(None)
528
+
529
+ def range_dimension_tensor(self, name="range_dimension_tensor"):
530
+ """Dimension (in the sense of vector spaces) of the range of this operator.
531
+
532
+ Determined at runtime.
533
+
534
+ If this operator acts like the batch matrix `A` with
535
+ `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
536
+
537
+ Args:
538
+ name: A name for this `Op`.
539
+
540
+ Returns:
541
+ `int32` `Tensor`
542
+ """
543
+ # Derived classes get this "for free" once .shape() is implemented.
544
+ with self._name_scope(name): # pylint: disable=not-callable
545
+ return self._range_dimension_tensor()
546
+
547
+ def _range_dimension_tensor(self, shape=None):
548
+ # `shape` may be passed in if this can be pre-computed in a
549
+ # more efficient manner, e.g. without excessive Tensor conversions.
550
+ dim_value = tensor_shape.dimension_value(self.range_dimension)
551
+ if dim_value is not None:
552
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(dim_value)
553
+ else:
554
+ shape = self.shape_tensor() if shape is None else shape
555
+ return shape[-2]
556
+
557
+ def _assert_non_singular(self):
558
+ """Private default implementation of _assert_non_singular."""
559
+ logging.warn(
560
+ "Using (possibly slow) default implementation of assert_non_singular."
561
+ " Requires conversion to a dense matrix and O(N^3) operations.")
562
+ if self._can_use_cholesky():
563
+ return self.assert_positive_definite()
564
+ else:
565
+ singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
566
+ # TODO(langmore) Add .eig and .cond as methods.
567
+ cond = (math_ops.reduce_max(singular_values, axis=-1) /
568
+ math_ops.reduce_min(singular_values, axis=-1))
569
+ return check_ops.assert_less(
570
+ cond,
571
+ self._max_condition_number_to_be_non_singular(),
572
+ message="Singular matrix up to precision epsilon.")
573
+
574
+ def _max_condition_number_to_be_non_singular(self):
575
+ """Return the maximum condition number that we consider nonsingular."""
576
+ with ops.name_scope("max_nonsingular_condition_number"):
577
+ dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
578
+ eps = math_ops.cast(
579
+ math_ops.reduce_max([
580
+ 100.,
581
+ math_ops.cast(self.range_dimension_tensor(), self.dtype),
582
+ math_ops.cast(self.domain_dimension_tensor(), self.dtype)
583
+ ]), self.dtype) * dtype_eps
584
+ return 1. / eps
585
+
586
+ def assert_non_singular(self, name="assert_non_singular"):
587
+ """Returns an `Op` that asserts this operator is non singular.
588
+
589
+ This operator is considered non-singular if
590
+
591
+ ```
592
+ ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
593
+ eps := np.finfo(self.dtype.as_numpy_dtype).eps
594
+ ```
595
+
596
+ Args:
597
+ name: A string name to prepend to created ops.
598
+
599
+ Returns:
600
+ An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
601
+ the operator is singular.
602
+ """
603
+ with self._name_scope(name): # pylint: disable=not-callable
604
+ return self._assert_non_singular()
605
+
606
+ def _assert_positive_definite(self):
607
+ """Default implementation of _assert_positive_definite."""
608
+ logging.warn(
609
+ "Using (possibly slow) default implementation of "
610
+ "assert_positive_definite."
611
+ " Requires conversion to a dense matrix and O(N^3) operations.")
612
+ # If the operator is self-adjoint, then checking that
613
+ # Cholesky decomposition succeeds + results in positive diag is necessary
614
+ # and sufficient.
615
+ if self.is_self_adjoint:
616
+ return check_ops.assert_positive(
617
+ array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
618
+ message="Matrix was not positive definite.")
619
+ # We have no generic check for positive definite.
620
+ raise NotImplementedError("assert_positive_definite is not implemented.")
621
+
622
+ def assert_positive_definite(self, name="assert_positive_definite"):
623
+ """Returns an `Op` that asserts this operator is positive definite.
624
+
625
+ Here, positive definite means that the quadratic form `x^H A x` has positive
626
+ real part for all nonzero `x`. Note that we do not require the operator to
627
+ be self-adjoint to be positive definite.
628
+
629
+ Args:
630
+ name: A name to give this `Op`.
631
+
632
+ Returns:
633
+ An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
634
+ the operator is not positive definite.
635
+ """
636
+ with self._name_scope(name): # pylint: disable=not-callable
637
+ return self._assert_positive_definite()
638
+
639
+ def _assert_self_adjoint(self):
640
+ dense = self.to_dense()
641
+ logging.warn(
642
+ "Using (possibly slow) default implementation of assert_self_adjoint."
643
+ " Requires conversion to a dense matrix.")
644
+ return check_ops.assert_equal(
645
+ dense,
646
+ linalg.adjoint(dense),
647
+ message="Matrix was not equal to its adjoint.")
648
+
649
+ def assert_self_adjoint(self, name="assert_self_adjoint"):
650
+ """Returns an `Op` that asserts this operator is self-adjoint.
651
+
652
+ Here we check that this operator is *exactly* equal to its hermitian
653
+ transpose.
654
+
655
+ Args:
656
+ name: A string name to prepend to created ops.
657
+
658
+ Returns:
659
+ An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
660
+ the operator is not self-adjoint.
661
+ """
662
+ with self._name_scope(name): # pylint: disable=not-callable
663
+ return self._assert_self_adjoint()
664
+
665
+ def _check_input_dtype(self, arg):
666
+ """Check that arg.dtype == self.dtype."""
667
+ if arg.dtype.base_dtype != self.dtype:
668
+ raise TypeError(
669
+ "Expected argument to have dtype %s. Found: %s in tensor %s" %
670
+ (self.dtype, arg.dtype, arg))
671
+
672
+ @abc.abstractmethod
673
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
674
+ raise NotImplementedError("_matmul is not implemented.")
675
+
676
+ def matmul(
677
+ self,
678
+ x,
679
+ adjoint=False,
680
+ adjoint_arg=False,
681
+ name="matmul",
682
+ ):
683
+ """Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
684
+
685
+ ```python
686
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
687
+ operator = LinearOperator(...)
688
+ operator.shape = [..., M, N]
689
+
690
+ X = ... # shape [..., N, R], batch matrix, R > 0.
691
+
692
+ Y = operator.matmul(X)
693
+ Y.shape
694
+ ==> [..., M, R]
695
+
696
+ Y[..., :, r] = sum_j A[..., :, j] X[j, r]
697
+ ```
698
+
699
+ Args:
700
+ x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
701
+ `self`. See class docstring for definition of compatibility.
702
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
703
+ adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
704
+ the hermitian transpose (transposition and complex conjugation).
705
+ name: A name for this `Op`.
706
+
707
+ Returns:
708
+ A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
709
+ as `self`.
710
+ """
711
+ if isinstance(x, LinearOperator):
712
+ left_operator = self.adjoint() if adjoint else self
713
+ right_operator = x.adjoint() if adjoint_arg else x
714
+
715
+ if (right_operator.range_dimension is not None and
716
+ left_operator.domain_dimension is not None and
717
+ right_operator.range_dimension != left_operator.domain_dimension):
718
+ raise ValueError(
719
+ "Operators are incompatible. Expected `x` to have dimension"
720
+ " {} but got {}.".format(
721
+ left_operator.domain_dimension, right_operator.range_dimension))
722
+
723
+ with self._name_scope(name): # pylint: disable=not-callable
724
+ return self._linop_matmul(left_operator, right_operator)
725
+
726
+ with self._name_scope(name): # pylint: disable=not-callable
727
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
728
+ self._check_input_dtype(x)
729
+
730
+ self_dim = -2 if adjoint else -1
731
+ arg_dim = -1 if adjoint_arg else -2
732
+ tensor_shape.dimension_at_index(
733
+ self.shape, self_dim).assert_is_compatible_with(
734
+ x.shape[arg_dim])
735
+
736
+ return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
737
+
738
+ def _linop_matmul(
739
+ self, left_operator: "LinearOperator", right_operator: "LinearOperator"
740
+ ) -> "LinearOperator":
741
+ # instance of linear_operator_identity.LinearOperatorIdentity
742
+ if hasattr(right_operator, "_ones_diag") and not hasattr(
743
+ right_operator, "multiplier"
744
+ ):
745
+ return left_operator
746
+
747
+ # instance of linear_operator_zeros.LinearOperatorZeros
748
+ elif hasattr(right_operator, "_zeros_diag"):
749
+ if not right_operator.is_square or not left_operator.is_square:
750
+ raise ValueError(
751
+ "Matmul with non-square `LinearOperator`s or "
752
+ "non-square `LinearOperatorZeros` not supported at this time."
753
+ )
754
+ return right_operator
755
+
756
+ else:
757
+ # Generic matmul of two `LinearOperator`s.
758
+ is_square = property_hint_util.is_square(left_operator, right_operator)
759
+ is_non_singular = None
760
+ is_self_adjoint = None
761
+ is_positive_definite = None
762
+
763
+ if is_square:
764
+ is_non_singular = property_hint_util.combined_non_singular_hint(
765
+ left_operator, right_operator
766
+ )
767
+ # is_square can be None, so the explicit check for False is needed.
768
+ elif is_square is False: # pylint:disable=g-bool-id-comparison
769
+ is_non_singular = False
770
+ is_self_adjoint = False
771
+ is_positive_definite = False
772
+
773
+ # LinearOperator outputs a LinearOperatorComposition instance, which
774
+ # inherits from LinearOperator. The inline import is necessary to avoid
775
+ # errors due to this cyclic dependency.
776
+ from tensorflow.python.ops.linalg import linear_operator_composition # pylint: disable=g-import-not-at-top
777
+
778
+ return linear_operator_composition.LinearOperatorComposition(
779
+ operators=[left_operator, right_operator],
780
+ is_non_singular=is_non_singular,
781
+ is_self_adjoint=is_self_adjoint,
782
+ is_positive_definite=is_positive_definite,
783
+ is_square=is_square,
784
+ )
785
+
786
+ def __matmul__(self, other):
787
+ return self.matmul(other)
788
+
789
+ def _matvec(self, x, adjoint=False):
790
+ x_mat = array_ops.expand_dims(x, axis=-1)
791
+ y_mat = self.matmul(x_mat, adjoint=adjoint)
792
+ return array_ops.squeeze(y_mat, axis=-1)
793
+
794
+ def matvec(self, x, adjoint=False, name="matvec"):
795
+ """Transform [batch] vector `x` with left multiplication: `x --> Ax`.
796
+
797
+ ```python
798
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
799
+ operator = LinearOperator(...)
800
+
801
+ X = ... # shape [..., N], batch vector
802
+
803
+ Y = operator.matvec(X)
804
+ Y.shape
805
+ ==> [..., M]
806
+
807
+ Y[..., :] = sum_j A[..., :, j] X[..., j]
808
+ ```
809
+
810
+ Args:
811
+ x: `Tensor` with compatible shape and same `dtype` as `self`.
812
+ `x` is treated as a [batch] vector meaning for every set of leading
813
+ dimensions, the last dimension defines a vector.
814
+ See class docstring for definition of compatibility.
815
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
816
+ name: A name for this `Op`.
817
+
818
+ Returns:
819
+ A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
820
+ """
821
+ with self._name_scope(name): # pylint: disable=not-callable
822
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
823
+ self._check_input_dtype(x)
824
+ self_dim = -2 if adjoint else -1
825
+ tensor_shape.dimension_at_index(
826
+ self.shape, self_dim).assert_is_compatible_with(x.shape[-1])
827
+ return self._matvec(x, adjoint=adjoint)
828
+
829
+ def _determinant(self):
830
+ logging.warn(
831
+ "Using (possibly slow) default implementation of determinant."
832
+ " Requires conversion to a dense matrix and O(N^3) operations.")
833
+ if self._can_use_cholesky():
834
+ return math_ops.exp(self.log_abs_determinant())
835
+ return linalg_ops.matrix_determinant(self.to_dense())
836
+
837
+ def determinant(self, name="det"):
838
+ """Determinant for every batch member.
839
+
840
+ Args:
841
+ name: A name for this `Op`.
842
+
843
+ Returns:
844
+ `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
845
+
846
+ Raises:
847
+ NotImplementedError: If `self.is_square` is `False`.
848
+ """
849
+ if self.is_square is False:
850
+ raise NotImplementedError(
851
+ "Determinant not implemented for an operator that is expected to "
852
+ "not be square.")
853
+ with self._name_scope(name): # pylint: disable=not-callable
854
+ return self._determinant()
855
+
856
+ def _log_abs_determinant(self):
857
+ logging.warn(
858
+ "Using (possibly slow) default implementation of determinant."
859
+ " Requires conversion to a dense matrix and O(N^3) operations.")
860
+ if self._can_use_cholesky():
861
+ diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
862
+ return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
863
+ _, log_abs_det = linalg.slogdet(self.to_dense())
864
+ return log_abs_det
865
+
866
+ def log_abs_determinant(self, name="log_abs_det"):
867
+ """Log absolute value of determinant for every batch member.
868
+
869
+ Args:
870
+ name: A name for this `Op`.
871
+
872
+ Returns:
873
+ `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
874
+
875
+ Raises:
876
+ NotImplementedError: If `self.is_square` is `False`.
877
+ """
878
+ if self.is_square is False:
879
+ raise NotImplementedError(
880
+ "Determinant not implemented for an operator that is expected to "
881
+ "not be square.")
882
+ with self._name_scope(name): # pylint: disable=not-callable
883
+ return self._log_abs_determinant()
884
+
885
+ def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):
886
+ """Solve by conversion to a dense matrix."""
887
+ if self.is_square is False: # pylint: disable=g-bool-id-comparison
888
+ raise NotImplementedError(
889
+ "Solve is not yet implemented for non-square operators.")
890
+ rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
891
+ if self._can_use_cholesky():
892
+ return linalg_ops.cholesky_solve(
893
+ linalg_ops.cholesky(self.to_dense()), rhs)
894
+ return linear_operator_util.matrix_solve_with_broadcast(
895
+ self.to_dense(), rhs, adjoint=adjoint)
896
+
897
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
898
+ """Default implementation of _solve."""
899
+ logging.warn(
900
+ "Using (possibly slow) default implementation of solve."
901
+ " Requires conversion to a dense matrix and O(N^3) operations.")
902
+ return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
903
+
904
+ def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
905
+ """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
906
+
907
+ The returned `Tensor` will be close to an exact solution if `A` is well
908
+ conditioned. Otherwise closeness will vary. See class docstring for details.
909
+
910
+ Examples:
911
+
912
+ ```python
913
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
914
+ operator = LinearOperator(...)
915
+ operator.shape = [..., M, N]
916
+
917
+ # Solve R > 0 linear systems for every member of the batch.
918
+ RHS = ... # shape [..., M, R]
919
+
920
+ X = operator.solve(RHS)
921
+ # X[..., :, r] is the solution to the r'th linear system
922
+ # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
923
+
924
+ operator.matmul(X)
925
+ ==> RHS
926
+ ```
927
+
928
+ Args:
929
+ rhs: `Tensor` with same `dtype` as this operator and compatible shape.
930
+ `rhs` is treated like a [batch] matrix meaning for every set of leading
931
+ dimensions, the last two dimensions defines a matrix.
932
+ See class docstring for definition of compatibility.
933
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
934
+ of this `LinearOperator`: `A^H X = rhs`.
935
+ adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
936
+ is the hermitian transpose (transposition and complex conjugation).
937
+ name: A name scope to use for ops added by this method.
938
+
939
+ Returns:
940
+ `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
941
+
942
+ Raises:
943
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
944
+ """
945
+ if self.is_non_singular is False:
946
+ raise NotImplementedError(
947
+ "Exact solve not implemented for an operator that is expected to "
948
+ "be singular.")
949
+ if self.is_square is False:
950
+ raise NotImplementedError(
951
+ "Exact solve not implemented for an operator that is expected to "
952
+ "not be square.")
953
+ if isinstance(rhs, LinearOperator):
954
+ left_operator = self.adjoint() if adjoint else self
955
+ right_operator = rhs.adjoint() if adjoint_arg else rhs
956
+
957
+ if (right_operator.range_dimension is not None and
958
+ left_operator.domain_dimension is not None and
959
+ right_operator.range_dimension != left_operator.domain_dimension):
960
+ raise ValueError(
961
+ "Operators are incompatible. Expected `rhs` to have dimension"
962
+ " {} but got {}.".format(
963
+ left_operator.domain_dimension, right_operator.range_dimension))
964
+ with self._name_scope(name): # pylint: disable=not-callable
965
+ return self._linop_solve(left_operator, right_operator)
966
+
967
+ with self._name_scope(name): # pylint: disable=not-callable
968
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
969
+ rhs, name="rhs"
970
+ )
971
+ self._check_input_dtype(rhs)
972
+
973
+ self_dim = -1 if adjoint else -2
974
+ arg_dim = -1 if adjoint_arg else -2
975
+ tensor_shape.dimension_at_index(
976
+ self.shape, self_dim).assert_is_compatible_with(
977
+ rhs.shape[arg_dim])
978
+
979
+ return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
980
+
981
+ def _linop_solve(
982
+ self, left_operator: "LinearOperator", right_operator: "LinearOperator"
983
+ ) -> "LinearOperator":
984
+ # instance of linear_operator_identity.LinearOperatorIdentity
985
+ if hasattr(right_operator, "_ones_diag") and not hasattr(
986
+ right_operator, "multiplier"
987
+ ):
988
+ return left_operator.inverse()
989
+
990
+ # Generic solve of two `LinearOperator`s.
991
+ is_square = property_hint_util.is_square(left_operator, right_operator)
992
+ is_non_singular = None
993
+ is_self_adjoint = None
994
+ is_positive_definite = None
995
+
996
+ if is_square:
997
+ is_non_singular = property_hint_util.combined_non_singular_hint(
998
+ left_operator, right_operator
999
+ )
1000
+ elif is_square is False: # pylint:disable=g-bool-id-comparison
1001
+ is_non_singular = False
1002
+ is_self_adjoint = False
1003
+ is_positive_definite = False
1004
+
1005
+ # LinearOperator outputs a LinearOperatorComposition instance that contains
1006
+ # a LinearOperatorInversion instance, both of which
1007
+ # inherit from LinearOperator. The inline import is necessary to avoid
1008
+ # errors due to this cyclic dependency.
1009
+ from tensorflow.python.ops.linalg import linear_operator_composition # pylint: disable=g-import-not-at-top
1010
+ from tensorflow.python.ops.linalg import linear_operator_inversion # pylint: disable=g-import-not-at-top
1011
+
1012
+ return linear_operator_composition.LinearOperatorComposition(
1013
+ operators=[
1014
+ linear_operator_inversion.LinearOperatorInversion(left_operator),
1015
+ right_operator,
1016
+ ],
1017
+ is_non_singular=is_non_singular,
1018
+ is_self_adjoint=is_self_adjoint,
1019
+ is_positive_definite=is_positive_definite,
1020
+ is_square=is_square,
1021
+ )
1022
+
1023
+ def _solvevec(self, rhs, adjoint=False):
1024
+ """Default implementation of _solvevec."""
1025
+ rhs_mat = array_ops.expand_dims(rhs, axis=-1)
1026
+ solution_mat = self.solve(rhs_mat, adjoint=adjoint)
1027
+ return array_ops.squeeze(solution_mat, axis=-1)
1028
+
1029
+ def solvevec(self, rhs, adjoint=False, name="solve"):
1030
+ """Solve single equation with best effort: `A X = rhs`.
1031
+
1032
+ The returned `Tensor` will be close to an exact solution if `A` is well
1033
+ conditioned. Otherwise closeness will vary. See class docstring for details.
1034
+
1035
+ Examples:
1036
+
1037
+ ```python
1038
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
1039
+ operator = LinearOperator(...)
1040
+ operator.shape = [..., M, N]
1041
+
1042
+ # Solve one linear system for every member of the batch.
1043
+ RHS = ... # shape [..., M]
1044
+
1045
+ X = operator.solvevec(RHS)
1046
+ # X is the solution to the linear system
1047
+ # sum_j A[..., :, j] X[..., j] = RHS[..., :]
1048
+
1049
+ operator.matvec(X)
1050
+ ==> RHS
1051
+ ```
1052
+
1053
+ Args:
1054
+ rhs: `Tensor` with same `dtype` as this operator.
1055
+ `rhs` is treated like a [batch] vector meaning for every set of leading
1056
+ dimensions, the last dimension defines a vector. See class docstring
1057
+ for definition of compatibility regarding batch dimensions.
1058
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
1059
+ of this `LinearOperator`: `A^H X = rhs`.
1060
+ name: A name scope to use for ops added by this method.
1061
+
1062
+ Returns:
1063
+ `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
1064
+
1065
+ Raises:
1066
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
1067
+ """
1068
+ with self._name_scope(name): # pylint: disable=not-callable
1069
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
1070
+ rhs, name="rhs"
1071
+ )
1072
+ self._check_input_dtype(rhs)
1073
+ self_dim = -1 if adjoint else -2
1074
+ tensor_shape.dimension_at_index(
1075
+ self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1])
1076
+
1077
+ return self._solvevec(rhs, adjoint=adjoint)
1078
+
1079
+ def adjoint(self, name: str = "adjoint") -> "LinearOperator":
1080
+ """Returns the adjoint of the current `LinearOperator`.
1081
+
1082
+ Given `A` representing this `LinearOperator`, return `A*`.
1083
+ Note that calling `self.adjoint()` and `self.H` are equivalent.
1084
+
1085
+ Args:
1086
+ name: A name for this `Op`.
1087
+
1088
+ Returns:
1089
+ `LinearOperator` which represents the adjoint of this `LinearOperator`.
1090
+ """
1091
+ if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison
1092
+ return self
1093
+ with self._name_scope(name): # pylint: disable=not-callable
1094
+ return self._linop_adjoint()
1095
+
1096
+ # self.H is equivalent to self.adjoint().
1097
+ H = property(adjoint, None)
1098
+
1099
+ def _linop_adjoint(self) -> "LinearOperator":
1100
+ from tensorflow.python.ops.linalg import linear_operator_adjoint # pylint: disable=g-import-not-at-top
1101
+ return linear_operator_adjoint.LinearOperatorAdjoint(
1102
+ self,
1103
+ is_non_singular=self.is_non_singular,
1104
+ is_self_adjoint=self.is_self_adjoint,
1105
+ is_positive_definite=self.is_positive_definite,
1106
+ is_square=self.is_square)
1107
+
1108
+ def inverse(self, name: str = "inverse") -> "LinearOperator":
1109
+ """Returns the Inverse of this `LinearOperator`.
1110
+
1111
+ Given `A` representing this `LinearOperator`, return a `LinearOperator`
1112
+ representing `A^-1`.
1113
+
1114
+ Args:
1115
+ name: A name scope to use for ops added by this method.
1116
+
1117
+ Returns:
1118
+ `LinearOperator` representing inverse of this matrix.
1119
+
1120
+ Raises:
1121
+ ValueError: When the `LinearOperator` is not hinted to be `non_singular`.
1122
+ """
1123
+ if self.is_square is False: # pylint: disable=g-bool-id-comparison
1124
+ raise ValueError("Cannot take the Inverse: This operator represents "
1125
+ "a non square matrix.")
1126
+ if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison
1127
+ raise ValueError("Cannot take the Inverse: This operator represents "
1128
+ "a singular matrix.")
1129
+
1130
+ with self._name_scope(name): # pylint: disable=not-callable
1131
+ return self._linop_inverse()
1132
+
1133
+ def _linop_inverse(self) -> "LinearOperator":
1134
+ # The in-line import is necessary because linear_operator_inversion.py
1135
+ # depends on linear_operator.py. The in-line import works because the two
1136
+ # files are now in the same build target, but if the import were at the top
1137
+ # of the file there would be a partially-initialized module error caused by
1138
+ # the code cycle.
1139
+ from tensorflow.python.ops.linalg import linear_operator_inversion # pylint: disable=g-import-not-at-top
1140
+ return linear_operator_inversion.LinearOperatorInversion(
1141
+ self,
1142
+ is_non_singular=self.is_non_singular,
1143
+ is_self_adjoint=self.is_self_adjoint,
1144
+ is_positive_definite=self.is_positive_definite,
1145
+ is_square=self.is_square)
1146
+
1147
+ def cholesky(self, name: str = "cholesky") -> "LinearOperator":
1148
+ """Returns a Cholesky factor as a `LinearOperator`.
1149
+
1150
+ Given `A` representing this `LinearOperator`, if `A` is positive definite
1151
+ self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
1152
+ decomposition.
1153
+
1154
+ Args:
1155
+ name: A name for this `Op`.
1156
+
1157
+ Returns:
1158
+ `LinearOperator` which represents the lower triangular matrix
1159
+ in the Cholesky decomposition.
1160
+
1161
+ Raises:
1162
+ ValueError: When the `LinearOperator` is not hinted to be positive
1163
+ definite and self adjoint.
1164
+ """
1165
+
1166
+ if not self._can_use_cholesky():
1167
+ raise ValueError("Cannot take the Cholesky decomposition: "
1168
+ "Not a positive definite self adjoint matrix.")
1169
+ with self._name_scope(name): # pylint: disable=not-callable
1170
+ return self._linop_cholesky()
1171
+
1172
+ def _linop_cholesky(self) -> "LinearOperator":
1173
+ from tensorflow.python.ops.linalg import linear_operator_lower_triangular # pylint: disable=g-import-not-at-top
1174
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
1175
+ linalg_ops.cholesky(self.to_dense()),
1176
+ is_non_singular=True,
1177
+ is_self_adjoint=False,
1178
+ is_square=True)
1179
+
1180
+ def _to_dense(self):
1181
+ """Generic and often inefficient implementation. Override often."""
1182
+ if self.batch_shape.is_fully_defined():
1183
+ batch_shape = self.batch_shape
1184
+ else:
1185
+ batch_shape = self.batch_shape_tensor()
1186
+
1187
+ dim_value = tensor_shape.dimension_value(self.domain_dimension)
1188
+ if dim_value is not None:
1189
+ n = dim_value
1190
+ else:
1191
+ n = self.domain_dimension_tensor()
1192
+
1193
+ eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
1194
+ return self.matmul(eye)
1195
+
1196
+ def to_dense(self, name="to_dense"):
1197
+ """Return a dense (batch) matrix representing this operator."""
1198
+ with self._name_scope(name): # pylint: disable=not-callable
1199
+ return self._to_dense()
1200
+
1201
+ def _diag_part(self):
1202
+ """Generic and often inefficient implementation. Override often."""
1203
+ return array_ops.matrix_diag_part(self.to_dense())
1204
+
1205
+ def diag_part(self, name="diag_part"):
1206
+ """Efficiently get the [batch] diagonal part of this operator.
1207
+
1208
+ If this operator has shape `[B1,...,Bb, M, N]`, this returns a
1209
+ `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
1210
+ `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
1211
+
1212
+ ```
1213
+ my_operator = LinearOperatorDiag([1., 2.])
1214
+
1215
+ # Efficiently get the diagonal
1216
+ my_operator.diag_part()
1217
+ ==> [1., 2.]
1218
+
1219
+ # Equivalent, but inefficient method
1220
+ tf.linalg.diag_part(my_operator.to_dense())
1221
+ ==> [1., 2.]
1222
+ ```
1223
+
1224
+ Args:
1225
+ name: A name for this `Op`.
1226
+
1227
+ Returns:
1228
+ diag_part: A `Tensor` of same `dtype` as self.
1229
+ """
1230
+ with self._name_scope(name): # pylint: disable=not-callable
1231
+ return self._diag_part()
1232
+
1233
+ def _trace(self):
1234
+ return math_ops.reduce_sum(self.diag_part(), axis=-1)
1235
+
1236
+ def trace(self, name="trace"):
1237
+ """Trace of the linear operator, equal to sum of `self.diag_part()`.
1238
+
1239
+ If the operator is square, this is also the sum of the eigenvalues.
1240
+
1241
+ Args:
1242
+ name: A name for this `Op`.
1243
+
1244
+ Returns:
1245
+ Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
1246
+ """
1247
+ with self._name_scope(name): # pylint: disable=not-callable
1248
+ return self._trace()
1249
+
1250
+ def _add_to_tensor(self, x):
1251
+ # Override if a more efficient implementation is available.
1252
+ return self.to_dense() + x
1253
+
1254
+ def add_to_tensor(self, x, name="add_to_tensor"):
1255
+ """Add matrix represented by this operator to `x`. Equivalent to `A + x`.
1256
+
1257
+ Args:
1258
+ x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
1259
+ name: A name to give this `Op`.
1260
+
1261
+ Returns:
1262
+ A `Tensor` with broadcast shape and same `dtype` as `self`.
1263
+ """
1264
+ with self._name_scope(name): # pylint: disable=not-callable
1265
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
1266
+ self._check_input_dtype(x)
1267
+ return self._add_to_tensor(x)
1268
+
1269
+ def _eigvals(self):
1270
+ return linalg_ops.self_adjoint_eigvals(self.to_dense())
1271
+
1272
+ def eigvals(self, name="eigvals"):
1273
+ """Returns the eigenvalues of this linear operator.
1274
+
1275
+ If the operator is marked as self-adjoint (via `is_self_adjoint`)
1276
+ this computation can be more efficient.
1277
+
1278
+ Note: This currently only supports self-adjoint operators.
1279
+
1280
+ Args:
1281
+ name: A name for this `Op`.
1282
+
1283
+ Returns:
1284
+ Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.
1285
+ """
1286
+ if not self.is_self_adjoint:
1287
+ raise NotImplementedError("Only self-adjoint matrices are supported.")
1288
+ with self._name_scope(name): # pylint: disable=not-callable
1289
+ return self._eigvals()
1290
+
1291
+ def _cond(self):
1292
+ if not self.is_self_adjoint:
1293
+ # In general the condition number is the ratio of the
1294
+ # absolute value of the largest and smallest singular values.
1295
+ vals = linalg_ops.svd(self.to_dense(), compute_uv=False)
1296
+ else:
1297
+ # For self-adjoint matrices, and in general normal matrices,
1298
+ # we can use eigenvalues.
1299
+ vals = math_ops.abs(self._eigvals())
1300
+
1301
+ return (math_ops.reduce_max(vals, axis=-1) /
1302
+ math_ops.reduce_min(vals, axis=-1))
1303
+
1304
+ def cond(self, name="cond"):
1305
+ """Returns the condition number of this linear operator.
1306
+
1307
+ Args:
1308
+ name: A name for this `Op`.
1309
+
1310
+ Returns:
1311
+ Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
1312
+ """
1313
+ with self._name_scope(name): # pylint: disable=not-callable
1314
+ return self._cond()
1315
+
1316
+ def _can_use_cholesky(self):
1317
+ return self.is_self_adjoint and self.is_positive_definite
1318
+
1319
+ def _set_graph_parents(self, graph_parents):
1320
+ """Set self._graph_parents. Called during derived class init.
1321
+
1322
+ This method allows derived classes to set graph_parents, without triggering
1323
+ a deprecation warning (which is invoked if `graph_parents` is passed during
1324
+ `__init__`.
1325
+
1326
+ Args:
1327
+ graph_parents: Iterable over Tensors.
1328
+ """
1329
+ # TODO(b/143910018) Remove this function in V3.
1330
+ graph_parents = [] if graph_parents is None else graph_parents
1331
+ for i, t in enumerate(graph_parents):
1332
+ if t is None or not (linear_operator_util.is_ref(t) or
1333
+ tensor_util.is_tf_type(t)):
1334
+ raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
1335
+ self._graph_parents = graph_parents
1336
+
1337
+ @property
1338
+ def _composite_tensor_fields(self):
1339
+ """A tuple of parameter names to rebuild the `LinearOperator`.
1340
+
1341
+ The tuple contains the names of kwargs to the `LinearOperator`'s constructor
1342
+ that the `TypeSpec` needs to rebuild the `LinearOperator` instance.
1343
+
1344
+ "is_non_singular", "is_self_adjoint", "is_positive_definite", and
1345
+ "is_square" are common to all `LinearOperator` subclasses and may be
1346
+ omitted.
1347
+ """
1348
+ return ()
1349
+
1350
+ @property
1351
+ def _composite_tensor_prefer_static_fields(self):
1352
+ """A tuple of names referring to parameters that may be treated statically.
1353
+
1354
+ This is a subset of `_composite_tensor_fields`, and contains the names of
1355
+ of `Tensor`-like args to the `LinearOperator`s constructor that may be
1356
+ stored as static values, if they are statically known. These are typically
1357
+ shapes or axis values.
1358
+ """
1359
+ return ()
1360
+
1361
+ @property
1362
+ def _type_spec(self):
1363
+ # This property will be overwritten by the `@make_composite_tensor`
1364
+ # decorator. However, we need it so that a valid subclass of the `ABCMeta`
1365
+ # class `CompositeTensor` can be constructed and passed to the
1366
+ # `@make_composite_tensor` decorator.
1367
+ pass
1368
+
1369
+ def _convert_variables_to_tensors(self):
1370
+ """Recursively converts ResourceVariables in the LinearOperator to Tensors.
1371
+
1372
+ The usage of `self._type_spec._from_components` violates the contract of
1373
+ `CompositeTensor`, since it is called on a different nested structure
1374
+ (one containing only `Tensor`s) than `self.type_spec` specifies (one that
1375
+ may contain `ResourceVariable`s). Since `LinearOperator`'s
1376
+ `_from_components` method just passes the contents of the nested structure
1377
+ to `__init__` to rebuild the operator, and any `LinearOperator` that may be
1378
+ instantiated with `ResourceVariables` may also be instantiated with
1379
+ `Tensor`s, this usage is valid.
1380
+
1381
+ Returns:
1382
+ tensor_operator: `self` with all internal Variables converted to Tensors.
1383
+ """
1384
+ # pylint: disable=protected-access
1385
+ components = self._type_spec._to_components(self)
1386
+ tensor_components = variable_utils.convert_variables_to_tensors(
1387
+ components)
1388
+ return self._type_spec._from_components(tensor_components)
1389
+ # pylint: enable=protected-access
1390
+
1391
+ def __getitem__(self, slices):
1392
+ return slicing.batch_slice(self, params_overrides={}, slices=slices)
1393
+
1394
+ @property
1395
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
1396
+ """A dict of names to number of dimensions contributing to an operator.
1397
+
1398
+ This is a dictionary of parameter names to `int`s specifying the
1399
+ number of right-most dimensions contributing to the **matrix** shape of the
1400
+ densified operator.
1401
+ If the parameter is a `Tensor`, this is mapped to an `int`.
1402
+ If the parameter is a `LinearOperator` (called `A`), this specifies the
1403
+ number of batch dimensions of `A` contributing to this `LinearOperator`s
1404
+ matrix shape.
1405
+ If the parameter is a structure, this is a structure of the same type of
1406
+ `int`s.
1407
+ """
1408
+ return ()
1409
+
1410
+ __composite_gradient__ = _LinearOperatorGradient()
1411
+
1412
+
1413
+ class _LinearOperatorSpec(type_spec.BatchableTypeSpec):
1414
+ """A tf.TypeSpec for `LinearOperator` objects."""
1415
+
1416
+ __slots__ = ("_param_specs", "_non_tensor_params", "_prefer_static_fields")
1417
+
1418
+ def __init__(self, param_specs, non_tensor_params, prefer_static_fields):
1419
+ """Initializes a new `_LinearOperatorSpec`.
1420
+
1421
+ Args:
1422
+ param_specs: Python `dict` of `tf.TypeSpec` instances that describe
1423
+ kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or
1424
+ `CompositeTensor` subclasses.
1425
+ non_tensor_params: Python `dict` containing non-`Tensor` and non-
1426
+ `CompositeTensor` kwargs to the `LinearOperator`'s constructor.
1427
+ prefer_static_fields: Python `tuple` of strings corresponding to the names
1428
+ of `Tensor`-like args to the `LinearOperator`s constructor that may be
1429
+ stored as static values, if known. These are typically shapes, indices,
1430
+ or axis values.
1431
+ """
1432
+ self._param_specs = param_specs
1433
+ self._non_tensor_params = non_tensor_params
1434
+ self._prefer_static_fields = prefer_static_fields
1435
+
1436
+ @classmethod
1437
+ def from_operator(cls, operator):
1438
+ """Builds a `_LinearOperatorSpec` from a `LinearOperator` instance.
1439
+
1440
+ Args:
1441
+ operator: An instance of `LinearOperator`.
1442
+
1443
+ Returns:
1444
+ linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as
1445
+ the `TypeSpec` of `operator`.
1446
+ """
1447
+ validation_fields = ("is_non_singular", "is_self_adjoint",
1448
+ "is_positive_definite", "is_square")
1449
+ kwargs = _extract_attrs(
1450
+ operator,
1451
+ keys=set(operator._composite_tensor_fields + validation_fields)) # pylint: disable=protected-access
1452
+
1453
+ non_tensor_params = {}
1454
+ param_specs = {}
1455
+ for k, v in list(kwargs.items()):
1456
+ type_spec_or_v = _extract_type_spec_recursively(v)
1457
+ is_tensor = [isinstance(x, type_spec.TypeSpec)
1458
+ for x in nest.flatten(type_spec_or_v)]
1459
+ if all(is_tensor):
1460
+ param_specs[k] = type_spec_or_v
1461
+ elif not any(is_tensor):
1462
+ non_tensor_params[k] = v
1463
+ else:
1464
+ raise NotImplementedError(f"Field {k} contains a mix of `Tensor` and "
1465
+ f" non-`Tensor` values.")
1466
+
1467
+ return cls(
1468
+ param_specs=param_specs,
1469
+ non_tensor_params=non_tensor_params,
1470
+ prefer_static_fields=operator._composite_tensor_prefer_static_fields) # pylint: disable=protected-access
1471
+
1472
+ def _to_components(self, obj):
1473
+ return _extract_attrs(obj, keys=list(self._param_specs))
1474
+
1475
+ def _from_components(self, components):
1476
+ kwargs = dict(self._non_tensor_params, **components)
1477
+ return self.value_type(**kwargs)
1478
+
1479
+ @property
1480
+ def _component_specs(self):
1481
+ return self._param_specs
1482
+
1483
+ def _serialize(self):
1484
+ return (self._param_specs,
1485
+ self._non_tensor_params,
1486
+ self._prefer_static_fields)
1487
+
1488
+ def _copy(self, **overrides):
1489
+ kwargs = {
1490
+ "param_specs": self._param_specs,
1491
+ "non_tensor_params": self._non_tensor_params,
1492
+ "prefer_static_fields": self._prefer_static_fields
1493
+ }
1494
+ kwargs.update(overrides)
1495
+ return type(self)(**kwargs)
1496
+
1497
+ def _batch(self, batch_size):
1498
+ """Returns a TypeSpec representing a batch of objects with this TypeSpec."""
1499
+ return self._copy(
1500
+ param_specs=nest.map_structure(
1501
+ lambda spec: spec._batch(batch_size), # pylint: disable=protected-access
1502
+ self._param_specs))
1503
+
1504
+ def _unbatch(self, batch_size):
1505
+ """Returns a TypeSpec representing a single element of this TypeSpec."""
1506
+ return self._copy(
1507
+ param_specs=nest.map_structure(
1508
+ lambda spec: spec._unbatch(), # pylint: disable=protected-access
1509
+ self._param_specs))
1510
+
1511
+
1512
+ def make_composite_tensor(cls, module_name="tf.linalg"):
1513
+ """Class decorator to convert `LinearOperator`s to `CompositeTensor`."""
1514
+
1515
+ spec_name = "{}Spec".format(cls.__name__)
1516
+ spec_type = type(spec_name, (_LinearOperatorSpec,), {"value_type": cls})
1517
+ type_spec_registry.register("{}.{}".format(module_name, spec_name))(spec_type)
1518
+ cls._type_spec = property(spec_type.from_operator) # pylint: disable=protected-access
1519
+ return cls
1520
+
1521
+
1522
+ def _extract_attrs(op, keys):
1523
+ """Extract constructor kwargs to reconstruct `op`.
1524
+
1525
+ Args:
1526
+ op: A `LinearOperator` instance.
1527
+ keys: A Python `tuple` of strings indicating the names of the constructor
1528
+ kwargs to extract from `op`.
1529
+
1530
+ Returns:
1531
+ kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.
1532
+ """
1533
+
1534
+ kwargs = {}
1535
+ not_found = object()
1536
+ for k in keys:
1537
+ srcs = [
1538
+ getattr(op, k, not_found), getattr(op, "_" + k, not_found),
1539
+ getattr(op, "parameters", {}).get(k, not_found),
1540
+ ]
1541
+ if any(v is not not_found for v in srcs):
1542
+ kwargs[k] = [v for v in srcs if v is not not_found][0]
1543
+ else:
1544
+ raise ValueError(
1545
+ f"Could not determine an appropriate value for field `{k}` in object "
1546
+ f" `{op}`. Looked for \n"
1547
+ f" 1. an attr called `{k}`,\n"
1548
+ f" 2. an attr called `_{k}`,\n"
1549
+ f" 3. an entry in `op.parameters` with key '{k}'.")
1550
+ if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None: # pylint: disable=protected-access
1551
+ if tensor_util.is_tensor(kwargs[k]):
1552
+ static_val = tensor_util.constant_value(kwargs[k])
1553
+ if static_val is not None:
1554
+ kwargs[k] = static_val
1555
+ if isinstance(kwargs[k], (np.ndarray, np.generic)):
1556
+ kwargs[k] = kwargs[k].tolist()
1557
+ return kwargs
1558
+
1559
+
1560
+ def _extract_type_spec_recursively(value):
1561
+ """Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.
1562
+
1563
+ If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If
1564
+ `value` is a collection containing `Tensor` values, recursively supplant them
1565
+ with their respective `TypeSpec`s in a collection of parallel stucture.
1566
+
1567
+ If `value` is none of the above, return it unchanged.
1568
+
1569
+ Args:
1570
+ value: a Python `object` to (possibly) turn into a (collection of)
1571
+ `tf.TypeSpec`(s).
1572
+
1573
+ Returns:
1574
+ spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`
1575
+ or `value`, if no `Tensor`s are found.
1576
+ """
1577
+ if isinstance(value, composite_tensor.CompositeTensor):
1578
+ return value._type_spec # pylint: disable=protected-access
1579
+ if isinstance(value, variables.Variable):
1580
+ return resource_variable_ops.VariableSpec(
1581
+ value.shape, dtype=value.dtype, trainable=value.trainable)
1582
+ if tensor_util.is_tensor(value):
1583
+ return tensor_spec.TensorSpec(value.shape, value.dtype)
1584
+ # Unwrap trackable data structures to comply with `Type_Spec._serialize`
1585
+ # requirements. `ListWrapper`s are converted to `list`s, and for other
1586
+ # trackable data structures, the `__wrapped__` attribute is used.
1587
+ if isinstance(value, list):
1588
+ return list(_extract_type_spec_recursively(v) for v in value)
1589
+ if isinstance(value, data_structures.TrackableDataStructure):
1590
+ return _extract_type_spec_recursively(value.__wrapped__)
1591
+ if isinstance(value, tuple):
1592
+ return type(value)(_extract_type_spec_recursively(x) for x in value)
1593
+ if isinstance(value, dict):
1594
+ return type(value)((k, _extract_type_spec_recursively(v))
1595
+ for k, v in value.items())
1596
+ return value
1597
+
1598
+
1599
+ # Overrides for tf.linalg functions. This allows a LinearOperator to be used in
1600
+ # place of a Tensor.
1601
+ # For instance tf.trace(linop) and linop.trace() both work.
1602
+
1603
+
1604
+ @dispatch.dispatch_for_types(linalg.adjoint, LinearOperator)
1605
+ def _adjoint(matrix, name=None):
1606
+ return matrix.adjoint(name)
1607
+
1608
+
1609
+ @dispatch.dispatch_for_types(linalg.cholesky, LinearOperator)
1610
+ def _cholesky(input, name=None): # pylint:disable=redefined-builtin
1611
+ return input.cholesky(name)
1612
+
1613
+
1614
+ # The signature has to match with the one in python/op/array_ops.py,
1615
+ # so we have k, padding_value, and align even though we don't use them here.
1616
+ # pylint:disable=unused-argument
1617
+ @dispatch.dispatch_for_types(linalg.diag_part, LinearOperator)
1618
+ def _diag_part(
1619
+ input, # pylint:disable=redefined-builtin
1620
+ name="diag_part",
1621
+ k=0,
1622
+ padding_value=0,
1623
+ align="RIGHT_LEFT"):
1624
+ return input.diag_part(name)
1625
+ # pylint:enable=unused-argument
1626
+
1627
+
1628
+ @dispatch.dispatch_for_types(linalg.det, LinearOperator)
1629
+ def _det(input, name=None): # pylint:disable=redefined-builtin
1630
+ return input.determinant(name)
1631
+
1632
+
1633
+ @dispatch.dispatch_for_types(linalg.inv, LinearOperator)
1634
+ def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin
1635
+ inv = input.inverse(name)
1636
+ if adjoint:
1637
+ inv = inv.adjoint()
1638
+ return inv
1639
+
1640
+
1641
+ @dispatch.dispatch_for_types(linalg.logdet, LinearOperator)
1642
+ def _logdet(matrix, name=None):
1643
+ if matrix.is_positive_definite and matrix.is_self_adjoint:
1644
+ return matrix.log_abs_determinant(name)
1645
+ raise ValueError("Expected matrix to be self-adjoint positive definite.")
1646
+
1647
+
1648
+ @dispatch.dispatch_for_types(math_ops.matmul, LinearOperator)
1649
+ def _matmul( # pylint:disable=missing-docstring
1650
+ a,
1651
+ b,
1652
+ transpose_a=False,
1653
+ transpose_b=False,
1654
+ adjoint_a=False,
1655
+ adjoint_b=False,
1656
+ a_is_sparse=False,
1657
+ b_is_sparse=False,
1658
+ output_type=None, # pylint: disable=unused-argument
1659
+ grad_a=False, # pylint: disable=unused-argument
1660
+ grad_b=False, # pylint: disable=unused-argument
1661
+ name=None,
1662
+ ):
1663
+ if transpose_a or transpose_b:
1664
+ raise ValueError("Transposing not supported at this time.")
1665
+ if a_is_sparse or b_is_sparse:
1666
+ raise ValueError("Sparse methods not supported at this time.")
1667
+ if not isinstance(a, LinearOperator):
1668
+ # We use the identity (B^HA^H)^H = AB
1669
+ adjoint_matmul = b.matmul(
1670
+ a,
1671
+ adjoint=(not adjoint_b),
1672
+ adjoint_arg=(not adjoint_a),
1673
+ name=name)
1674
+ return linalg.adjoint(adjoint_matmul)
1675
+ return a.matmul(
1676
+ b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)
1677
+
1678
+
1679
+ @dispatch.dispatch_for_types(linalg.solve, LinearOperator)
1680
+ def _solve(
1681
+ matrix,
1682
+ rhs,
1683
+ adjoint=False,
1684
+ name=None):
1685
+ if not isinstance(matrix, LinearOperator):
1686
+ raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a "
1687
+ "LinearOperator is not supported.")
1688
+ return matrix.solve(rhs, adjoint=adjoint, name=name)
1689
+
1690
+
1691
+ @dispatch.dispatch_for_types(linalg.trace, LinearOperator)
1692
+ def _trace(x, name=None):
1693
+ return x.trace(name)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_addition.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Add one or more `LinearOperators` efficiently."""
16
+
17
+ import abc
18
+
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_shape
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.ops import check_ops
23
+ from tensorflow.python.ops.linalg import linear_operator
24
+ from tensorflow.python.ops.linalg import linear_operator_diag
25
+ from tensorflow.python.ops.linalg import linear_operator_full_matrix
26
+ from tensorflow.python.ops.linalg import linear_operator_identity
27
+ from tensorflow.python.ops.linalg import linear_operator_lower_triangular
28
+
29
+ __all__ = []
30
+
31
+
32
+ def add_operators(operators,
33
+ operator_name=None,
34
+ addition_tiers=None,
35
+ name=None):
36
+ """Efficiently add one or more linear operators.
37
+
38
+ Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of
39
+ operators `[B1, B2,...]` such that
40
+
41
+ ```sum_k Ak.matmul(x) = sum_k Bk.matmul(x).```
42
+
43
+ The operators `Bk` result by adding some of the `Ak`, as allowed by
44
+ `addition_tiers`.
45
+
46
+ Example of efficient adding of diagonal operators.
47
+
48
+ ```python
49
+ A1 = LinearOperatorDiag(diag=[1., 1.], name="A1")
50
+ A2 = LinearOperatorDiag(diag=[2., 2.], name="A2")
51
+
52
+ # Use two tiers, the first contains an Adder that returns Diag. Since both
53
+ # A1 and A2 are Diag, they can use this Adder. The second tier will not be
54
+ # used.
55
+ addition_tiers = [
56
+ [_AddAndReturnDiag()],
57
+ [_AddAndReturnMatrix()]]
58
+ B_list = add_operators([A1, A2], addition_tiers=addition_tiers)
59
+
60
+ len(B_list)
61
+ ==> 1
62
+
63
+ B_list[0].__class__.__name__
64
+ ==> 'LinearOperatorDiag'
65
+
66
+ B_list[0].to_dense()
67
+ ==> [[3., 0.],
68
+ [0., 3.]]
69
+
70
+ B_list[0].name
71
+ ==> 'Add/A1__A2/'
72
+ ```
73
+
74
+ Args:
75
+ operators: Iterable of `LinearOperator` objects with same `dtype`, domain
76
+ and range dimensions, and broadcastable batch shapes.
77
+ operator_name: String name for returned `LinearOperator`. Defaults to
78
+ concatenation of "Add/A__B/" that indicates the order of addition steps.
79
+ addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i`
80
+ is a list of `Adder` objects. This function attempts to do all additions
81
+ in tier `i` before trying tier `i + 1`.
82
+ name: A name for this `Op`. Defaults to `add_operators`.
83
+
84
+ Returns:
85
+ Subclass of `LinearOperator`. Class and order of addition may change as new
86
+ (and better) addition strategies emerge.
87
+
88
+ Raises:
89
+ ValueError: If `operators` argument is empty.
90
+ ValueError: If shapes are incompatible.
91
+ """
92
+ # Default setting
93
+ if addition_tiers is None:
94
+ addition_tiers = _DEFAULT_ADDITION_TIERS
95
+
96
+ # Argument checking.
97
+ check_ops.assert_proper_iterable(operators)
98
+ operators = list(reversed(operators))
99
+ if len(operators) < 1:
100
+ raise ValueError(
101
+ f"Argument `operators` must contain at least one operator. "
102
+ f"Received: {operators}.")
103
+ if not all(
104
+ isinstance(op, linear_operator.LinearOperator) for op in operators):
105
+ raise TypeError(
106
+ f"Argument `operators` must contain only LinearOperator instances. "
107
+ f"Received: {operators}.")
108
+ _static_check_for_same_dimensions(operators)
109
+ _static_check_for_broadcastable_batch_shape(operators)
110
+
111
+ with ops.name_scope(name or "add_operators"):
112
+
113
+ # Additions done in one of the tiers. Try tier 0, 1,...
114
+ ops_to_try_at_next_tier = list(operators)
115
+ for tier in addition_tiers:
116
+ ops_to_try_at_this_tier = ops_to_try_at_next_tier
117
+ ops_to_try_at_next_tier = []
118
+ while ops_to_try_at_this_tier:
119
+ op1 = ops_to_try_at_this_tier.pop()
120
+ op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier)
121
+ if op2 is not None:
122
+ # Will try to add the result of this again at this same tier.
123
+ new_operator = adder.add(op1, op2, operator_name)
124
+ ops_to_try_at_this_tier.append(new_operator)
125
+ else:
126
+ ops_to_try_at_next_tier.append(op1)
127
+
128
+ return ops_to_try_at_next_tier
129
+
130
+
131
+ def _pop_a_match_at_tier(op1, operator_list, tier):
132
+ # Search from the back of list to the front in order to create nice default
133
+ # order of operations.
134
+ for i in range(1, len(operator_list) + 1):
135
+ op2 = operator_list[-i]
136
+ for adder in tier:
137
+ if adder.can_add(op1, op2):
138
+ return operator_list.pop(-i), adder
139
+ return None, None
140
+
141
+
142
+ def _infer_hints_allowing_override(op1, op2, hints):
143
+ """Infer hints from op1 and op2. hints argument is an override.
144
+
145
+ Args:
146
+ op1: LinearOperator
147
+ op2: LinearOperator
148
+ hints: _Hints object holding "is_X" boolean hints to use for returned
149
+ operator.
150
+ If some hint is None, try to set using op1 and op2. If the
151
+ hint is provided, ignore op1 and op2 hints. This allows an override
152
+ of previous hints, but does not allow forbidden hints (e.g. you still
153
+ cannot say a real diagonal operator is not self-adjoint.
154
+
155
+ Returns:
156
+ _Hints object.
157
+ """
158
+ hints = hints or _Hints()
159
+ # If A, B are self-adjoint, then so is A + B.
160
+ if hints.is_self_adjoint is None:
161
+ is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint
162
+ else:
163
+ is_self_adjoint = hints.is_self_adjoint
164
+
165
+ # If A, B are positive definite, then so is A + B.
166
+ if hints.is_positive_definite is None:
167
+ is_positive_definite = op1.is_positive_definite and op2.is_positive_definite
168
+ else:
169
+ is_positive_definite = hints.is_positive_definite
170
+
171
+ # A positive definite operator is always non-singular.
172
+ if is_positive_definite and hints.is_positive_definite is None:
173
+ is_non_singular = True
174
+ else:
175
+ is_non_singular = hints.is_non_singular
176
+
177
+ return _Hints(
178
+ is_non_singular=is_non_singular,
179
+ is_self_adjoint=is_self_adjoint,
180
+ is_positive_definite=is_positive_definite)
181
+
182
+
183
+ def _static_check_for_same_dimensions(operators):
184
+ """ValueError if operators determined to have different dimensions."""
185
+ if len(operators) < 2:
186
+ return
187
+
188
+ domain_dimensions = [
189
+ (op.name, tensor_shape.dimension_value(op.domain_dimension))
190
+ for op in operators
191
+ if tensor_shape.dimension_value(op.domain_dimension) is not None]
192
+ if len(set(value for name, value in domain_dimensions)) > 1:
193
+ raise ValueError(f"All `operators` must have the same `domain_dimension`. "
194
+ f"Received: {domain_dimensions}.")
195
+
196
+ range_dimensions = [
197
+ (op.name, tensor_shape.dimension_value(op.range_dimension))
198
+ for op in operators
199
+ if tensor_shape.dimension_value(op.range_dimension) is not None]
200
+ if len(set(value for name, value in range_dimensions)) > 1:
201
+ raise ValueError(f"All operators must have the same `range_dimension`. "
202
+ f"Received: {range_dimensions}.")
203
+
204
+
205
+ def _static_check_for_broadcastable_batch_shape(operators):
206
+ """ValueError if operators determined to have non-broadcastable shapes."""
207
+ if len(operators) < 2:
208
+ return
209
+
210
+ # This will fail if they cannot be broadcast together.
211
+ batch_shape = operators[0].batch_shape
212
+ for op in operators[1:]:
213
+ batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)
214
+
215
+
216
+ class _Hints:
217
+ """Holds 'is_X' flags that every LinearOperator is initialized with."""
218
+
219
+ def __init__(self,
220
+ is_non_singular=None,
221
+ is_positive_definite=None,
222
+ is_self_adjoint=None):
223
+ self.is_non_singular = is_non_singular
224
+ self.is_positive_definite = is_positive_definite
225
+ self.is_self_adjoint = is_self_adjoint
226
+
227
+
228
+ ################################################################################
229
+ # Classes to add two linear operators.
230
+ ################################################################################
231
+
232
+
233
+ class _Adder(metaclass=abc.ABCMeta):
234
+ """Abstract base class to add two operators.
235
+
236
+ Each `Adder` acts independently, adding everything it can, paying no attention
237
+ as to whether another `Adder` could have done the addition more efficiently.
238
+ """
239
+
240
+ @property
241
+ def name(self):
242
+ return self.__class__.__name__
243
+
244
+ @abc.abstractmethod
245
+ def can_add(self, op1, op2):
246
+ """Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`."""
247
+ pass
248
+
249
+ @abc.abstractmethod
250
+ def _add(self, op1, op2, operator_name, hints):
251
+ # Derived classes can assume op1 and op2 have been validated, e.g. they have
252
+ # the same dtype, and their domain/range dimensions match.
253
+ pass
254
+
255
+ def add(self, op1, op2, operator_name, hints=None):
256
+ """Return new `LinearOperator` acting like `op1 + op2`.
257
+
258
+ Args:
259
+ op1: `LinearOperator`
260
+ op2: `LinearOperator`, with `shape` and `dtype` such that adding to
261
+ `op1` is allowed.
262
+ operator_name: `String` name to give to returned `LinearOperator`
263
+ hints: `_Hints` object. Returned `LinearOperator` will be created with
264
+ these hints.
265
+
266
+ Returns:
267
+ `LinearOperator`
268
+ """
269
+ updated_hints = _infer_hints_allowing_override(op1, op2, hints)
270
+
271
+ if operator_name is None:
272
+ operator_name = "Add/" + op1.name + "__" + op2.name + "/"
273
+
274
+ scope_name = self.name
275
+ if scope_name.startswith("_"):
276
+ scope_name = scope_name[1:]
277
+ with ops.name_scope(scope_name):
278
+ return self._add(op1, op2, operator_name, updated_hints)
279
+
280
+
281
+ class _AddAndReturnScaledIdentity(_Adder):
282
+ """Handles additions resulting in an Identity family member.
283
+
284
+ The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family
285
+ is closed under addition. This `Adder` respects that, and returns an Identity
286
+ """
287
+
288
+ def can_add(self, op1, op2):
289
+ types = {_type(op1), _type(op2)}
290
+ return not types.difference(_IDENTITY_FAMILY)
291
+
292
+ def _add(self, op1, op2, operator_name, hints):
293
+ # Will build a LinearOperatorScaledIdentity.
294
+
295
+ if _type(op1) == _SCALED_IDENTITY:
296
+ multiplier_1 = op1.multiplier
297
+ else:
298
+ multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)
299
+
300
+ if _type(op2) == _SCALED_IDENTITY:
301
+ multiplier_2 = op2.multiplier
302
+ else:
303
+ multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)
304
+
305
+ return linear_operator_identity.LinearOperatorScaledIdentity(
306
+ num_rows=op1.range_dimension_tensor(),
307
+ multiplier=multiplier_1 + multiplier_2,
308
+ is_non_singular=hints.is_non_singular,
309
+ is_self_adjoint=hints.is_self_adjoint,
310
+ is_positive_definite=hints.is_positive_definite,
311
+ name=operator_name)
312
+
313
+
314
+ class _AddAndReturnDiag(_Adder):
315
+ """Handles additions resulting in a Diag operator."""
316
+
317
+ def can_add(self, op1, op2):
318
+ types = {_type(op1), _type(op2)}
319
+ return not types.difference(_DIAG_LIKE)
320
+
321
+ def _add(self, op1, op2, operator_name, hints):
322
+ return linear_operator_diag.LinearOperatorDiag(
323
+ diag=op1.diag_part() + op2.diag_part(),
324
+ is_non_singular=hints.is_non_singular,
325
+ is_self_adjoint=hints.is_self_adjoint,
326
+ is_positive_definite=hints.is_positive_definite,
327
+ name=operator_name)
328
+
329
+
330
+ class _AddAndReturnTriL(_Adder):
331
+ """Handles additions resulting in a TriL operator."""
332
+
333
+ def can_add(self, op1, op2):
334
+ types = {_type(op1), _type(op2)}
335
+ return not types.difference(_DIAG_LIKE.union({_TRIL}))
336
+
337
+ def _add(self, op1, op2, operator_name, hints):
338
+ if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
339
+ op_add_to_tensor, op_other = op1, op2
340
+ else:
341
+ op_add_to_tensor, op_other = op2, op1
342
+
343
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
344
+ tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
345
+ is_non_singular=hints.is_non_singular,
346
+ is_self_adjoint=hints.is_self_adjoint,
347
+ is_positive_definite=hints.is_positive_definite,
348
+ name=operator_name)
349
+
350
+
351
+ class _AddAndReturnMatrix(_Adder):
352
+ """"Handles additions resulting in a `LinearOperatorFullMatrix`."""
353
+
354
+ def can_add(self, op1, op2): # pylint: disable=unused-argument
355
+ return isinstance(op1, linear_operator.LinearOperator) and isinstance(
356
+ op2, linear_operator.LinearOperator)
357
+
358
+ def _add(self, op1, op2, operator_name, hints):
359
+ if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
360
+ op_add_to_tensor, op_other = op1, op2
361
+ else:
362
+ op_add_to_tensor, op_other = op2, op1
363
+ return linear_operator_full_matrix.LinearOperatorFullMatrix(
364
+ matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
365
+ is_non_singular=hints.is_non_singular,
366
+ is_self_adjoint=hints.is_self_adjoint,
367
+ is_positive_definite=hints.is_positive_definite,
368
+ name=operator_name)
369
+
370
+
371
+ ################################################################################
372
+ # Constants designating types of LinearOperators
373
+ ################################################################################
374
+
375
+ # Type name constants for LinearOperator classes.
376
+ _IDENTITY = "identity"
377
+ _SCALED_IDENTITY = "scaled_identity"
378
+ _DIAG = "diag"
379
+ _TRIL = "tril"
380
+ _MATRIX = "matrix"
381
+
382
+ # Groups of operators.
383
+ _DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY}
384
+ _IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY}
385
+ # operators with an efficient .add_to_tensor() method.
386
+ _EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE
387
+
388
+ # Supported LinearOperator classes.
389
+ SUPPORTED_OPERATORS = [
390
+ linear_operator_diag.LinearOperatorDiag,
391
+ linear_operator_lower_triangular.LinearOperatorLowerTriangular,
392
+ linear_operator_full_matrix.LinearOperatorFullMatrix,
393
+ linear_operator_identity.LinearOperatorIdentity,
394
+ linear_operator_identity.LinearOperatorScaledIdentity
395
+ ]
396
+
397
+
398
+ def _type(operator):
399
+ """Returns the type name constant (e.g. _TRIL) for operator."""
400
+ if isinstance(operator, linear_operator_diag.LinearOperatorDiag):
401
+ return _DIAG
402
+ if isinstance(operator,
403
+ linear_operator_lower_triangular.LinearOperatorLowerTriangular):
404
+ return _TRIL
405
+ if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):
406
+ return _MATRIX
407
+ if isinstance(operator, linear_operator_identity.LinearOperatorIdentity):
408
+ return _IDENTITY
409
+ if isinstance(operator,
410
+ linear_operator_identity.LinearOperatorScaledIdentity):
411
+ return _SCALED_IDENTITY
412
+ raise TypeError(f"Expected operator to be one of [LinearOperatorDiag, "
413
+ f"LinearOperatorLowerTriangular, LinearOperatorFullMatrix, "
414
+ f"LinearOperatorIdentity, LinearOperatorScaledIdentity]. "
415
+ f"Received: {operator}")
416
+
417
+
418
+ ################################################################################
419
+ # Addition tiers:
420
+ # We attempt to use Adders in tier K before K+1.
421
+ #
422
+ # Organize tiers to
423
+ # (i) reduce O(..) complexity of forming final operator, and
424
+ # (ii) produce the "most efficient" final operator.
425
+ # Dev notes:
426
+ # * Results of addition at tier K will be added at tier K or higher.
427
+ # * Tiers may change, and we warn the user that it may change.
428
+ ################################################################################
429
+
430
+ # Note that the final tier, _AddAndReturnMatrix, will convert everything to a
431
+ # dense matrix. So it is sometimes very inefficient.
432
+ _DEFAULT_ADDITION_TIERS = [
433
+ [_AddAndReturnScaledIdentity()],
434
+ [_AddAndReturnDiag()],
435
+ [_AddAndReturnTriL()],
436
+ [_AddAndReturnMatrix()],
437
+ ]
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_adjoint.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Takes the adjoint of a `LinearOperator`."""
16
+
17
+ from tensorflow.python.framework import ops
18
+ from tensorflow.python.ops import array_ops
19
+ from tensorflow.python.ops import math_ops
20
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
21
+ from tensorflow.python.ops.linalg import linear_operator
22
+ from tensorflow.python.ops.linalg import linear_operator_util
23
+ from tensorflow.python.util.tf_export import tf_export
24
+
25
+ __all__ = ["LinearOperatorAdjoint"]
26
+
27
+
28
+ @tf_export("linalg.LinearOperatorAdjoint")
29
+ @linear_operator.make_composite_tensor
30
+ class LinearOperatorAdjoint(linear_operator.LinearOperator):
31
+ """`LinearOperator` representing the adjoint of another operator.
32
+
33
+ This operator represents the adjoint of another operator.
34
+
35
+ ```python
36
+ # Create a 2 x 2 linear operator.
37
+ operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]])
38
+ operator_adjoint = LinearOperatorAdjoint(operator)
39
+
40
+ operator_adjoint.to_dense()
41
+ ==> [[1. + i, 0.]
42
+ [3., 1 - i]]
43
+
44
+ operator_adjoint.shape
45
+ ==> [2, 2]
46
+
47
+ operator_adjoint.log_abs_determinant()
48
+ ==> - log(2)
49
+
50
+ x = ... Shape [2, 4] Tensor
51
+ operator_adjoint.matmul(x)
52
+ ==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True)
53
+ ```
54
+
55
+ #### Performance
56
+
57
+ The performance of `LinearOperatorAdjoint` depends on the underlying
58
+ operators performance.
59
+
60
+ #### Matrix property hints
61
+
62
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
63
+ for `X = non_singular, self_adjoint, positive_definite, square`.
64
+ These have the following meaning:
65
+
66
+ * If `is_X == True`, callers should expect the operator to have the
67
+ property `X`. This is a promise that should be fulfilled, but is *not* a
68
+ runtime assert. For example, finite floating point precision may result
69
+ in these promises being violated.
70
+ * If `is_X == False`, callers should expect the operator to not have `X`.
71
+ * If `is_X == None` (the default), callers should have no expectation either
72
+ way.
73
+ """
74
+
75
+ def __init__(self,
76
+ operator,
77
+ is_non_singular=None,
78
+ is_self_adjoint=None,
79
+ is_positive_definite=None,
80
+ is_square=None,
81
+ name=None):
82
+ r"""Initialize a `LinearOperatorAdjoint`.
83
+
84
+ `LinearOperatorAdjoint` is initialized with an operator `A`. The `solve`
85
+ and `matmul` methods effectively flip the `adjoint` argument. E.g.
86
+
87
+ ```
88
+ A = MyLinearOperator(...)
89
+ B = LinearOperatorAdjoint(A)
90
+ x = [....] # a vector
91
+
92
+ assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False)
93
+ ```
94
+
95
+ Args:
96
+ operator: `LinearOperator` object.
97
+ is_non_singular: Expect that this operator is non-singular.
98
+ is_self_adjoint: Expect that this operator is equal to its hermitian
99
+ transpose.
100
+ is_positive_definite: Expect that this operator is positive definite,
101
+ meaning the quadratic form `x^H A x` has positive real part for all
102
+ nonzero `x`. Note that we do not require the operator to be
103
+ self-adjoint to be positive-definite. See:
104
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
105
+ is_square: Expect that this operator acts like square [batch] matrices.
106
+ name: A name for this `LinearOperator`. Default is `operator.name +
107
+ "_adjoint"`.
108
+
109
+ Raises:
110
+ ValueError: If `operator.is_non_singular` is False.
111
+ """
112
+ parameters = dict(
113
+ operator=operator,
114
+ is_non_singular=is_non_singular,
115
+ is_self_adjoint=is_self_adjoint,
116
+ is_positive_definite=is_positive_definite,
117
+ is_square=is_square,
118
+ name=name,
119
+ )
120
+
121
+ self._operator = operator
122
+
123
+ # The congruency of is_non_singular and is_self_adjoint was checked in the
124
+ # base operator.
125
+ combine_hint = (
126
+ linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
127
+
128
+ is_square = combine_hint(
129
+ operator, "is_square", is_square,
130
+ "An operator is square if and only if its adjoint is square.")
131
+
132
+ is_non_singular = combine_hint(
133
+ operator, "is_non_singular", is_non_singular,
134
+ "An operator is non-singular if and only if its adjoint is "
135
+ "non-singular.")
136
+
137
+ is_self_adjoint = combine_hint(
138
+ operator, "is_self_adjoint", is_self_adjoint,
139
+ "An operator is self-adjoint if and only if its adjoint is "
140
+ "self-adjoint.")
141
+
142
+ is_positive_definite = combine_hint(
143
+ operator, "is_positive_definite", is_positive_definite,
144
+ "An operator is positive-definite if and only if its adjoint is "
145
+ "positive-definite.")
146
+
147
+ # Initialization.
148
+ if name is None:
149
+ name = operator.name + "_adjoint"
150
+ with ops.name_scope(name):
151
+ super(LinearOperatorAdjoint, self).__init__(
152
+ dtype=operator.dtype,
153
+ is_non_singular=is_non_singular,
154
+ is_self_adjoint=is_self_adjoint,
155
+ is_positive_definite=is_positive_definite,
156
+ is_square=is_square,
157
+ parameters=parameters,
158
+ name=name)
159
+
160
+ @property
161
+ def operator(self):
162
+ """The operator before taking the adjoint."""
163
+ return self._operator
164
+
165
+ def _linop_adjoint(self) -> linear_operator.LinearOperator:
166
+ return self.operator
167
+
168
+ def _assert_non_singular(self):
169
+ return self.operator.assert_non_singular()
170
+
171
+ def _assert_positive_definite(self):
172
+ return self.operator.assert_positive_definite()
173
+
174
+ def _assert_self_adjoint(self):
175
+ return self.operator.assert_self_adjoint()
176
+
177
+ def _shape(self):
178
+ # Rotate last dimension
179
+ shape = self.operator.shape
180
+ return shape[:-2].concatenate([shape[-1], shape[-2]])
181
+
182
+ def _shape_tensor(self):
183
+ # Rotate last dimension
184
+ shape = self.operator.shape_tensor()
185
+ return array_ops.concat([
186
+ shape[:-2], [shape[-1], shape[-2]]], axis=-1)
187
+
188
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
189
+ return self.operator.matmul(
190
+ x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
191
+
192
+ def _matvec(self, x, adjoint=False):
193
+ return self.operator.matvec(x, adjoint=(not adjoint))
194
+
195
+ def _determinant(self):
196
+ if self.is_self_adjoint:
197
+ return self.operator.determinant()
198
+ return math_ops.conj(self.operator.determinant())
199
+
200
+ def _log_abs_determinant(self):
201
+ return self.operator.log_abs_determinant()
202
+
203
+ def _trace(self):
204
+ if self.is_self_adjoint:
205
+ return self.operator.trace()
206
+ return math_ops.conj(self.operator.trace())
207
+
208
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
209
+ return self.operator.solve(
210
+ rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
211
+
212
+ def _solvevec(self, rhs, adjoint=False):
213
+ return self.operator.solvevec(rhs, adjoint=(not adjoint))
214
+
215
+ def _to_dense(self):
216
+ if self.is_self_adjoint:
217
+ return self.operator.to_dense()
218
+ return linalg.adjoint(self.operator.to_dense())
219
+
220
+ def _add_to_tensor(self, x):
221
+ return self.to_dense() + x
222
+
223
+ def _eigvals(self):
224
+ eigvals = self.operator.eigvals()
225
+ if not self.operator.is_self_adjoint:
226
+ eigvals = math_ops.conj(eigvals)
227
+ return eigvals
228
+
229
+ def _cond(self):
230
+ return self.operator.cond()
231
+
232
+ @property
233
+ def _composite_tensor_fields(self):
234
+ return ("operator",)
235
+
236
+ @property
237
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
238
+ return {"operator": 0}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_diag.py ADDED
@@ -0,0 +1,818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Create a Block Diagonal operator from one or more `LinearOperators`."""
16
+
17
+ from tensorflow.python.framework import common_shapes
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_conversion
21
+ from tensorflow.python.framework import tensor_shape
22
+ from tensorflow.python.ops import array_ops
23
+ from tensorflow.python.ops import array_ops_stack
24
+ from tensorflow.python.ops import check_ops
25
+ from tensorflow.python.ops import control_flow_ops
26
+ from tensorflow.python.ops.linalg import linear_operator
27
+ from tensorflow.python.ops.linalg import linear_operator_util
28
+ from tensorflow.python.ops.linalg import property_hint_util
29
+ from tensorflow.python.util.tf_export import tf_export
30
+
31
+ __all__ = ["LinearOperatorBlockDiag"]
32
+
33
+
34
+ @tf_export("linalg.LinearOperatorBlockDiag")
35
+ @linear_operator.make_composite_tensor
36
+ class LinearOperatorBlockDiag(linear_operator.LinearOperator):
37
+ """Combines one or more `LinearOperators` in to a Block Diagonal matrix.
38
+
39
+ This operator combines one or more linear operators `[op1,...,opJ]`,
40
+ building a new `LinearOperator`, whose underlying matrix representation
41
+ has each operator `opi` on the main diagonal, and zero's elsewhere.
42
+
43
+ #### Shape compatibility
44
+
45
+ If `opj` acts like a [batch] matrix `Aj`, then `op_combined` acts like
46
+ the [batch] matrix formed by having each matrix `Aj` on the main
47
+ diagonal.
48
+
49
+ Each `opj` is required to represent a matrix, and hence will have
50
+ shape `batch_shape_j + [M_j, N_j]`.
51
+
52
+ If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the combined operator
53
+ has shape `broadcast_batch_shape + [sum M_j, sum N_j]`, where
54
+ `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
55
+ `j = 1,...,J`, assuming the intermediate batch shapes broadcast.
56
+
57
+ Arguments to `matmul`, `matvec`, `solve`, and `solvevec` may either be single
58
+ `Tensor`s or lists of `Tensor`s that are interpreted as blocks. The `j`th
59
+ element of a blockwise list of `Tensor`s must have dimensions that match
60
+ `opj` for the given method. If a list of blocks is input, then a list of
61
+ blocks is returned as well.
62
+
63
+ When the `opj` are not guaranteed to be square, this operator's methods might
64
+ fail due to the combined operator not being square and/or lack of efficient
65
+ methods.
66
+
67
+ ```python
68
+ # Create a 4 x 4 linear operator combined of two 2 x 2 operators.
69
+ operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
70
+ operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
71
+ operator = LinearOperatorBlockDiag([operator_1, operator_2])
72
+
73
+ operator.to_dense()
74
+ ==> [[1., 2., 0., 0.],
75
+ [3., 4., 0., 0.],
76
+ [0., 0., 1., 0.],
77
+ [0., 0., 0., 1.]]
78
+
79
+ operator.shape
80
+ ==> [4, 4]
81
+
82
+ operator.log_abs_determinant()
83
+ ==> scalar Tensor
84
+
85
+ x1 = ... # Shape [2, 2] Tensor
86
+ x2 = ... # Shape [2, 2] Tensor
87
+ x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor
88
+ operator.matmul(x)
89
+ ==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)])
90
+
91
+ # Create a 5 x 4 linear operator combining three blocks.
92
+ operator_1 = LinearOperatorFullMatrix([[1.], [3.]])
93
+ operator_2 = LinearOperatorFullMatrix([[1., 6.]])
94
+ operator_3 = LinearOperatorFullMatrix([[2.], [7.]])
95
+ operator = LinearOperatorBlockDiag([operator_1, operator_2, operator_3])
96
+
97
+ operator.to_dense()
98
+ ==> [[1., 0., 0., 0.],
99
+ [3., 0., 0., 0.],
100
+ [0., 1., 6., 0.],
101
+ [0., 0., 0., 2.]]
102
+ [0., 0., 0., 7.]]
103
+
104
+ operator.shape
105
+ ==> [5, 4]
106
+
107
+
108
+ # Create a [2, 3] batch of 4 x 4 linear operators.
109
+ matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
110
+ operator_44 = LinearOperatorFullMatrix(matrix)
111
+
112
+ # Create a [1, 3] batch of 5 x 5 linear operators.
113
+ matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
114
+ operator_55 = LinearOperatorFullMatrix(matrix_55)
115
+
116
+ # Combine to create a [2, 3] batch of 9 x 9 operators.
117
+ operator_99 = LinearOperatorBlockDiag([operator_44, operator_55])
118
+
119
+ # Create a shape [2, 3, 9] vector.
120
+ x = tf.random.normal(shape=[2, 3, 9])
121
+ operator_99.matmul(x)
122
+ ==> Shape [2, 3, 9] Tensor
123
+
124
+ # Create a blockwise list of vectors.
125
+ x = [tf.random.normal(shape=[2, 3, 4]), tf.random.normal(shape=[2, 3, 5])]
126
+ operator_99.matmul(x)
127
+ ==> [Shape [2, 3, 4] Tensor, Shape [2, 3, 5] Tensor]
128
+ ```
129
+
130
+ #### Performance
131
+
132
+ The performance of `LinearOperatorBlockDiag` on any operation is equal to
133
+ the sum of the individual operators' operations.
134
+
135
+
136
+ #### Matrix property hints
137
+
138
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
139
+ for `X = non_singular, self_adjoint, positive_definite, square`.
140
+ These have the following meaning:
141
+
142
+ * If `is_X == True`, callers should expect the operator to have the
143
+ property `X`. This is a promise that should be fulfilled, but is *not* a
144
+ runtime assert. For example, finite floating point precision may result
145
+ in these promises being violated.
146
+ * If `is_X == False`, callers should expect the operator to not have `X`.
147
+ * If `is_X == None` (the default), callers should have no expectation either
148
+ way.
149
+ """
150
+
151
+ def __init__(self,
152
+ operators,
153
+ is_non_singular=None,
154
+ is_self_adjoint=None,
155
+ is_positive_definite=None,
156
+ is_square=True,
157
+ name=None):
158
+ r"""Initialize a `LinearOperatorBlockDiag`.
159
+
160
+ `LinearOperatorBlockDiag` is initialized with a list of operators
161
+ `[op_1,...,op_J]`.
162
+
163
+ Args:
164
+ operators: Iterable of `LinearOperator` objects, each with
165
+ the same `dtype` and composable shape.
166
+ is_non_singular: Expect that this operator is non-singular.
167
+ is_self_adjoint: Expect that this operator is equal to its hermitian
168
+ transpose.
169
+ is_positive_definite: Expect that this operator is positive definite,
170
+ meaning the quadratic form `x^H A x` has positive real part for all
171
+ nonzero `x`. Note that we do not require the operator to be
172
+ self-adjoint to be positive-definite. See:
173
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
174
+ is_square: Expect that this operator acts like square [batch] matrices.
175
+ This is true by default, and will raise a `ValueError` otherwise.
176
+ name: A name for this `LinearOperator`. Default is the individual
177
+ operators names joined with `_o_`.
178
+
179
+ Raises:
180
+ TypeError: If all operators do not have the same `dtype`.
181
+ ValueError: If `operators` is empty or are non-square.
182
+ """
183
+ parameters = dict(
184
+ operators=operators,
185
+ is_non_singular=is_non_singular,
186
+ is_self_adjoint=is_self_adjoint,
187
+ is_positive_definite=is_positive_definite,
188
+ is_square=is_square,
189
+ name=name
190
+ )
191
+
192
+ # Validate operators.
193
+ check_ops.assert_proper_iterable(operators)
194
+ operators = list(operators)
195
+ if not operators:
196
+ raise ValueError(
197
+ "Expected a non-empty list of operators. Found: %s" % operators)
198
+ self._operators = operators
199
+
200
+ # Define diagonal operators, for functions that are shared across blockwise
201
+ # `LinearOperator` types.
202
+ self._diagonal_operators = operators
203
+
204
+ # Validate dtype.
205
+ dtype = operators[0].dtype
206
+ for operator in operators:
207
+ if operator.dtype != dtype:
208
+ name_type = (str((o.name, o.dtype)) for o in operators)
209
+ raise TypeError(
210
+ "Expected all operators to have the same dtype. Found %s"
211
+ % " ".join(name_type))
212
+
213
+ # Auto-set and check hints.
214
+ if all(operator.is_non_singular for operator in operators):
215
+ if is_non_singular is False:
216
+ raise ValueError(
217
+ "The direct sum of non-singular operators is always non-singular.")
218
+ is_non_singular = True
219
+
220
+ if all(operator.is_self_adjoint for operator in operators):
221
+ if is_self_adjoint is False:
222
+ raise ValueError(
223
+ "The direct sum of self-adjoint operators is always self-adjoint.")
224
+ is_self_adjoint = True
225
+
226
+ if all(operator.is_positive_definite for operator in operators):
227
+ if is_positive_definite is False:
228
+ raise ValueError(
229
+ "The direct sum of positive definite operators is always "
230
+ "positive definite.")
231
+ is_positive_definite = True
232
+
233
+ if name is None:
234
+ # Using ds to mean direct sum.
235
+ name = "_ds_".join(operator.name for operator in operators)
236
+ with ops.name_scope(name):
237
+ super(LinearOperatorBlockDiag, self).__init__(
238
+ dtype=dtype,
239
+ is_non_singular=is_non_singular,
240
+ is_self_adjoint=is_self_adjoint,
241
+ is_positive_definite=is_positive_definite,
242
+ is_square=is_square,
243
+ parameters=parameters,
244
+ name=name)
245
+
246
+ @property
247
+ def operators(self):
248
+ return self._operators
249
+
250
+ def _block_range_dimensions(self):
251
+ return [op.range_dimension for op in self._diagonal_operators]
252
+
253
+ def _block_domain_dimensions(self):
254
+ return [op.domain_dimension for op in self._diagonal_operators]
255
+
256
+ def _block_range_dimension_tensors(self):
257
+ return [op.range_dimension_tensor() for op in self._diagonal_operators]
258
+
259
+ def _block_domain_dimension_tensors(self):
260
+ return [op.domain_dimension_tensor() for op in self._diagonal_operators]
261
+
262
+ def _shape(self):
263
+ # Get final matrix shape.
264
+ domain_dimension = sum(self._block_domain_dimensions())
265
+ range_dimension = sum(self._block_range_dimensions())
266
+ matrix_shape = tensor_shape.TensorShape([range_dimension, domain_dimension])
267
+
268
+ # Get broadcast batch shape.
269
+ # broadcast_shape checks for compatibility.
270
+ batch_shape = self.operators[0].batch_shape
271
+ for operator in self.operators[1:]:
272
+ batch_shape = common_shapes.broadcast_shape(
273
+ batch_shape, operator.batch_shape)
274
+
275
+ return batch_shape.concatenate(matrix_shape)
276
+
277
+ def _shape_tensor(self):
278
+ # Avoid messy broadcasting if possible.
279
+ if self.shape.is_fully_defined():
280
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(
281
+ self.shape.as_list(), dtype=dtypes.int32, name="shape"
282
+ )
283
+
284
+ domain_dimension = sum(self._block_domain_dimension_tensors())
285
+ range_dimension = sum(self._block_range_dimension_tensors())
286
+ matrix_shape = array_ops_stack.stack([range_dimension, domain_dimension])
287
+
288
+ # Dummy Tensor of zeros. Will never be materialized.
289
+ zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
290
+ for operator in self.operators[1:]:
291
+ zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
292
+ batch_shape = array_ops.shape(zeros)
293
+
294
+ return array_ops.concat((batch_shape, matrix_shape), 0)
295
+
296
+ def _linop_adjoint(self) -> "LinearOperatorBlockDiag":
297
+ # We take the adjoint of each block on the diagonal.
298
+ return LinearOperatorBlockDiag(
299
+ operators=[operator.adjoint() for operator in self.operators],
300
+ is_non_singular=self.is_non_singular,
301
+ is_self_adjoint=self.is_self_adjoint,
302
+ is_positive_definite=self.is_positive_definite,
303
+ is_square=True)
304
+
305
+ def _linop_cholesky(self) -> "LinearOperatorBlockDiag":
306
+ # We take the cholesky of each block on the diagonal.
307
+ return LinearOperatorBlockDiag(
308
+ operators=[operator.cholesky() for operator in self.operators],
309
+ is_non_singular=True,
310
+ is_self_adjoint=None, # Let the operators passed in decide.
311
+ is_square=True)
312
+
313
+ def _linop_inverse(self) -> "LinearOperatorBlockDiag":
314
+ # We take the inverse of each block on the diagonal.
315
+ return LinearOperatorBlockDiag(
316
+ operators=[
317
+ operator.inverse() for operator in self.operators],
318
+ is_non_singular=self.is_non_singular,
319
+ is_self_adjoint=self.is_self_adjoint,
320
+ is_positive_definite=self.is_positive_definite,
321
+ is_square=True)
322
+
323
+ def _linop_matmul(
324
+ self,
325
+ left_operator: "LinearOperatorBlockDiag",
326
+ right_operator: linear_operator.LinearOperator,
327
+ ) -> linear_operator.LinearOperator:
328
+ if isinstance(right_operator, LinearOperatorBlockDiag):
329
+ return LinearOperatorBlockDiag(
330
+ operators=[
331
+ o1.matmul(o2) for o1, o2 in zip(
332
+ left_operator.operators, right_operator.operators)],
333
+ is_non_singular=property_hint_util.combined_non_singular_hint(
334
+ left_operator, right_operator),
335
+ # In general, a product of self-adjoint positive-definite
336
+ # block diagonal matrices is not self-adjoint.
337
+ is_self_adjoint=None,
338
+ # In general, a product of positive-definite block diagonal
339
+ # matrices is not positive-definite.
340
+ is_positive_definite=None,
341
+ is_square=True)
342
+ return super()._linop_matmul(left_operator, right_operator)
343
+
344
+ def _linop_solve(
345
+ self,
346
+ left_operator: "LinearOperatorBlockDiag",
347
+ right_operator: linear_operator.LinearOperator,
348
+ ) -> linear_operator.LinearOperator:
349
+ if isinstance(right_operator, LinearOperatorBlockDiag):
350
+ return LinearOperatorBlockDiag(
351
+ operators=[
352
+ o1.solve(o2) for o1, o2 in zip(
353
+ left_operator.operators, right_operator.operators)],
354
+ is_non_singular=property_hint_util.combined_non_singular_hint(
355
+ left_operator, right_operator),
356
+ # In general, a solve of self-adjoint positive-definite block diagonal
357
+ # matrices is not self-=adjoint.
358
+ is_self_adjoint=None,
359
+ # In general, a solve of positive-definite block diagonal matrices is
360
+ # not positive-definite.
361
+ is_positive_definite=None,
362
+ is_square=True)
363
+ return super()._linop_solve(left_operator, right_operator)
364
+
365
+ # TODO(b/188080761): Add a more efficient implementation of `cond` that
366
+ # constructs the condition number from the blockwise singular values.
367
+
368
+ def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
369
+ """Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
370
+
371
+ ```python
372
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
373
+ operator = LinearOperator(...)
374
+ operator.shape = [..., M, N]
375
+
376
+ X = ... # shape [..., N, R], batch matrix, R > 0.
377
+
378
+ Y = operator.matmul(X)
379
+ Y.shape
380
+ ==> [..., M, R]
381
+
382
+ Y[..., :, r] = sum_j A[..., :, j] X[j, r]
383
+ ```
384
+
385
+ Args:
386
+ x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as
387
+ `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See
388
+ class docstring for definition of shape compatibility.
389
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
390
+ adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
391
+ the hermitian transpose (transposition and complex conjugation).
392
+ name: A name for this `Op`.
393
+
394
+ Returns:
395
+ A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
396
+ as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that
397
+ concatenate to `[..., M, R]`.
398
+ """
399
+ def _check_operators_agree(r, l, message):
400
+ if (r.range_dimension is not None and
401
+ l.domain_dimension is not None and
402
+ r.range_dimension != l.domain_dimension):
403
+ raise ValueError(message)
404
+
405
+ if isinstance(x, linear_operator.LinearOperator):
406
+ left_operator = self.adjoint() if adjoint else self
407
+ right_operator = x.adjoint() if adjoint_arg else x
408
+
409
+ _check_operators_agree(
410
+ right_operator, left_operator,
411
+ "Operators are incompatible. Expected `x` to have dimension"
412
+ " {} but got {}.".format(
413
+ left_operator.domain_dimension, right_operator.range_dimension))
414
+
415
+ # We can efficiently multiply BlockDiag LinearOperators if the number of
416
+ # blocks agree.
417
+ if isinstance(x, LinearOperatorBlockDiag):
418
+ if len(left_operator.operators) != len(right_operator.operators):
419
+ raise ValueError(
420
+ "Can not efficiently multiply two `LinearOperatorBlockDiag`s "
421
+ "together when number of blocks differ.")
422
+
423
+ for o1, o2 in zip(left_operator.operators, right_operator.operators):
424
+ _check_operators_agree(
425
+ o2, o1,
426
+ "Blocks are incompatible. Expected `x` to have dimension"
427
+ " {} but got {}.".format(
428
+ o1.domain_dimension, o2.range_dimension))
429
+
430
+ with self._name_scope(name): # pylint: disable=not-callable
431
+ return self._linop_matmul(left_operator, right_operator)
432
+
433
+ with self._name_scope(name): # pylint: disable=not-callable
434
+ arg_dim = -1 if adjoint_arg else -2
435
+ block_dimensions = (self._block_range_dimensions() if adjoint
436
+ else self._block_domain_dimensions())
437
+ if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):
438
+ for i, block in enumerate(x):
439
+ if not isinstance(block, linear_operator.LinearOperator):
440
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
441
+ self._check_input_dtype(block)
442
+ block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
443
+ x[i] = block
444
+ else:
445
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
446
+ self._check_input_dtype(x)
447
+ op_dimension = (self.range_dimension if adjoint
448
+ else self.domain_dimension)
449
+ op_dimension.assert_is_compatible_with(x.shape[arg_dim])
450
+ return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
451
+
452
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
453
+ arg_dim = -1 if adjoint_arg else -2
454
+ block_dimensions = (self._block_range_dimensions() if adjoint
455
+ else self._block_domain_dimensions())
456
+ block_dimensions_fn = (
457
+ self._block_range_dimension_tensors if adjoint
458
+ else self._block_domain_dimension_tensors)
459
+ blockwise_arg = linear_operator_util.arg_is_blockwise(
460
+ block_dimensions, x, arg_dim)
461
+ if blockwise_arg:
462
+ split_x = x
463
+
464
+ else:
465
+ split_dim = -1 if adjoint_arg else -2
466
+ # Split input by rows normally, and otherwise columns.
467
+ split_x = linear_operator_util.split_arg_into_blocks(
468
+ block_dimensions, block_dimensions_fn, x, axis=split_dim)
469
+
470
+ result_list = []
471
+ for index, operator in enumerate(self.operators):
472
+ result_list += [operator.matmul(
473
+ split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
474
+
475
+ if blockwise_arg:
476
+ return result_list
477
+
478
+ result_list = linear_operator_util.broadcast_matrix_batch_dims(
479
+ result_list)
480
+ return array_ops.concat(result_list, axis=-2)
481
+
482
+ def matvec(self, x, adjoint=False, name="matvec"):
483
+ """Transform [batch] vector `x` with left multiplication: `x --> Ax`.
484
+
485
+ ```python
486
+ # Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
487
+ operator = LinearOperator(...)
488
+
489
+ X = ... # shape [..., N], batch vector
490
+
491
+ Y = operator.matvec(X)
492
+ Y.shape
493
+ ==> [..., M]
494
+
495
+ Y[..., :] = sum_j A[..., :, j] X[..., j]
496
+ ```
497
+
498
+ Args:
499
+ x: `Tensor` with compatible shape and same `dtype` as `self`, or an
500
+ iterable of `Tensor`s (for blockwise operators). `Tensor`s are treated
501
+ a [batch] vectors, meaning for every set of leading dimensions, the last
502
+ dimension defines a vector.
503
+ See class docstring for definition of compatibility.
504
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
505
+ name: A name for this `Op`.
506
+
507
+ Returns:
508
+ A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
509
+ """
510
+ with self._name_scope(name): # pylint: disable=not-callable
511
+ block_dimensions = (self._block_range_dimensions() if adjoint
512
+ else self._block_domain_dimensions())
513
+ if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):
514
+ for i, block in enumerate(x):
515
+ if not isinstance(block, linear_operator.LinearOperator):
516
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
517
+ self._check_input_dtype(block)
518
+ block_dimensions[i].assert_is_compatible_with(block.shape[-1])
519
+ x[i] = block
520
+ x_mat = [block[..., array_ops.newaxis] for block in x]
521
+ y_mat = self.matmul(x_mat, adjoint=adjoint)
522
+ return [array_ops.squeeze(y, axis=-1) for y in y_mat]
523
+
524
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
525
+ self._check_input_dtype(x)
526
+ op_dimension = (self.range_dimension if adjoint
527
+ else self.domain_dimension)
528
+ op_dimension.assert_is_compatible_with(x.shape[-1])
529
+ x_mat = x[..., array_ops.newaxis]
530
+ y_mat = self.matmul(x_mat, adjoint=adjoint)
531
+ return array_ops.squeeze(y_mat, axis=-1)
532
+
533
+ def _determinant(self):
534
+ result = self.operators[0].determinant()
535
+ for operator in self.operators[1:]:
536
+ result *= operator.determinant()
537
+ return result
538
+
539
+ def _log_abs_determinant(self):
540
+ result = self.operators[0].log_abs_determinant()
541
+ for operator in self.operators[1:]:
542
+ result += operator.log_abs_determinant()
543
+ return result
544
+
545
+ def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
546
+ """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
547
+
548
+ The returned `Tensor` will be close to an exact solution if `A` is well
549
+ conditioned. Otherwise closeness will vary. See class docstring for details.
550
+
551
+ Examples:
552
+
553
+ ```python
554
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
555
+ operator = LinearOperator(...)
556
+ operator.shape = [..., M, N]
557
+
558
+ # Solve R > 0 linear systems for every member of the batch.
559
+ RHS = ... # shape [..., M, R]
560
+
561
+ X = operator.solve(RHS)
562
+ # X[..., :, r] is the solution to the r'th linear system
563
+ # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
564
+
565
+ operator.matmul(X)
566
+ ==> RHS
567
+ ```
568
+
569
+ Args:
570
+ rhs: `Tensor` with same `dtype` as this operator and compatible shape,
571
+ or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated
572
+ like a [batch] matrices meaning for every set of leading dimensions, the
573
+ last two dimensions defines a matrix.
574
+ See class docstring for definition of compatibility.
575
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
576
+ of this `LinearOperator`: `A^H X = rhs`.
577
+ adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
578
+ is the hermitian transpose (transposition and complex conjugation).
579
+ name: A name scope to use for ops added by this method.
580
+
581
+ Returns:
582
+ `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
583
+
584
+ Raises:
585
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
586
+ """
587
+ if self.is_non_singular is False:
588
+ raise NotImplementedError(
589
+ "Exact solve not implemented for an operator that is expected to "
590
+ "be singular.")
591
+ if self.is_square is False:
592
+ raise NotImplementedError(
593
+ "Exact solve not implemented for an operator that is expected to "
594
+ "not be square.")
595
+
596
+ def _check_operators_agree(r, l, message):
597
+ if (r.range_dimension is not None and
598
+ l.domain_dimension is not None and
599
+ r.range_dimension != l.domain_dimension):
600
+ raise ValueError(message)
601
+
602
+ if isinstance(rhs, linear_operator.LinearOperator):
603
+ left_operator = self.adjoint() if adjoint else self
604
+ right_operator = rhs.adjoint() if adjoint_arg else rhs
605
+
606
+ _check_operators_agree(
607
+ right_operator, left_operator,
608
+ "Operators are incompatible. Expected `x` to have dimension"
609
+ " {} but got {}.".format(
610
+ left_operator.domain_dimension, right_operator.range_dimension))
611
+
612
+ # We can efficiently solve BlockDiag LinearOperators if the number of
613
+ # blocks agree.
614
+ if isinstance(right_operator, LinearOperatorBlockDiag):
615
+ if len(left_operator.operators) != len(right_operator.operators):
616
+ raise ValueError(
617
+ "Can not efficiently solve `LinearOperatorBlockDiag` when "
618
+ "number of blocks differ.")
619
+
620
+ for o1, o2 in zip(left_operator.operators, right_operator.operators):
621
+ _check_operators_agree(
622
+ o2, o1,
623
+ "Blocks are incompatible. Expected `x` to have dimension"
624
+ " {} but got {}.".format(
625
+ o1.domain_dimension, o2.range_dimension))
626
+
627
+ with self._name_scope(name): # pylint: disable=not-callable
628
+ return self._linop_solve(left_operator, right_operator)
629
+
630
+ with self._name_scope(name): # pylint: disable=not-callable
631
+ block_dimensions = (self._block_domain_dimensions() if adjoint
632
+ else self._block_range_dimensions())
633
+ arg_dim = -1 if adjoint_arg else -2
634
+ blockwise_arg = linear_operator_util.arg_is_blockwise(
635
+ block_dimensions, rhs, arg_dim)
636
+
637
+ if blockwise_arg:
638
+ split_rhs = rhs
639
+ for i, block in enumerate(split_rhs):
640
+ if not isinstance(block, linear_operator.LinearOperator):
641
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
642
+ self._check_input_dtype(block)
643
+ block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
644
+ split_rhs[i] = block
645
+ else:
646
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
647
+ rhs, name="rhs"
648
+ )
649
+ self._check_input_dtype(rhs)
650
+ op_dimension = (self.domain_dimension if adjoint
651
+ else self.range_dimension)
652
+ op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])
653
+ split_dim = -1 if adjoint_arg else -2
654
+ # Split input by rows normally, and otherwise columns.
655
+ split_rhs = linear_operator_util.split_arg_into_blocks(
656
+ self._block_domain_dimensions(),
657
+ self._block_domain_dimension_tensors,
658
+ rhs, axis=split_dim)
659
+
660
+ solution_list = []
661
+ for index, operator in enumerate(self.operators):
662
+ solution_list += [operator.solve(
663
+ split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
664
+
665
+ if blockwise_arg:
666
+ return solution_list
667
+
668
+ solution_list = linear_operator_util.broadcast_matrix_batch_dims(
669
+ solution_list)
670
+ return array_ops.concat(solution_list, axis=-2)
671
+
672
+ def solvevec(self, rhs, adjoint=False, name="solve"):
673
+ """Solve single equation with best effort: `A X = rhs`.
674
+
675
+ The returned `Tensor` will be close to an exact solution if `A` is well
676
+ conditioned. Otherwise closeness will vary. See class docstring for details.
677
+
678
+ Examples:
679
+
680
+ ```python
681
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
682
+ operator = LinearOperator(...)
683
+ operator.shape = [..., M, N]
684
+
685
+ # Solve one linear system for every member of the batch.
686
+ RHS = ... # shape [..., M]
687
+
688
+ X = operator.solvevec(RHS)
689
+ # X is the solution to the linear system
690
+ # sum_j A[..., :, j] X[..., j] = RHS[..., :]
691
+
692
+ operator.matvec(X)
693
+ ==> RHS
694
+ ```
695
+
696
+ Args:
697
+ rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s
698
+ (for blockwise operators). `Tensor`s are treated as [batch] vectors,
699
+ meaning for every set of leading dimensions, the last dimension defines
700
+ a vector. See class docstring for definition of compatibility regarding
701
+ batch dimensions.
702
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
703
+ of this `LinearOperator`: `A^H X = rhs`.
704
+ name: A name scope to use for ops added by this method.
705
+
706
+ Returns:
707
+ `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
708
+
709
+ Raises:
710
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
711
+ """
712
+ with self._name_scope(name): # pylint: disable=not-callable
713
+ block_dimensions = (self._block_domain_dimensions() if adjoint
714
+ else self._block_range_dimensions())
715
+ if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):
716
+ for i, block in enumerate(rhs):
717
+ if not isinstance(block, linear_operator.LinearOperator):
718
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
719
+ self._check_input_dtype(block)
720
+ block_dimensions[i].assert_is_compatible_with(block.shape[-1])
721
+ rhs[i] = block
722
+ rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]
723
+ solution_mat = self.solve(rhs_mat, adjoint=adjoint)
724
+ return [array_ops.squeeze(x, axis=-1) for x in solution_mat]
725
+
726
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
727
+ rhs, name="rhs"
728
+ )
729
+ self._check_input_dtype(rhs)
730
+ op_dimension = (self.domain_dimension if adjoint
731
+ else self.range_dimension)
732
+ op_dimension.assert_is_compatible_with(rhs.shape[-1])
733
+ rhs_mat = array_ops.expand_dims(rhs, axis=-1)
734
+ solution_mat = self.solve(rhs_mat, adjoint=adjoint)
735
+ return array_ops.squeeze(solution_mat, axis=-1)
736
+
737
+ def _diag_part(self):
738
+ if not all(operator.is_square for operator in self.operators):
739
+ raise NotImplementedError(
740
+ "`diag_part` not implemented for an operator whose blocks are not "
741
+ "square.")
742
+ diag_list = []
743
+ for operator in self.operators:
744
+ # Extend the axis for broadcasting.
745
+ diag_list += [operator.diag_part()[..., array_ops.newaxis]]
746
+ diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
747
+ diagonal = array_ops.concat(diag_list, axis=-2)
748
+ return array_ops.squeeze(diagonal, axis=-1)
749
+
750
+ def _trace(self):
751
+ if not all(operator.is_square for operator in self.operators):
752
+ raise NotImplementedError(
753
+ "`trace` not implemented for an operator whose blocks are not "
754
+ "square.")
755
+ result = self.operators[0].trace()
756
+ for operator in self.operators[1:]:
757
+ result += operator.trace()
758
+ return result
759
+
760
+ def _to_dense(self):
761
+ num_cols = 0
762
+ rows = []
763
+ broadcasted_blocks = [operator.to_dense() for operator in self.operators]
764
+ broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
765
+ broadcasted_blocks)
766
+ for block in broadcasted_blocks:
767
+ batch_row_shape = array_ops.shape(block)[:-1]
768
+
769
+ zeros_to_pad_before_shape = array_ops.concat(
770
+ [batch_row_shape, [num_cols]], axis=-1)
771
+ zeros_to_pad_before = array_ops.zeros(
772
+ shape=zeros_to_pad_before_shape, dtype=block.dtype)
773
+ num_cols += array_ops.shape(block)[-1]
774
+ zeros_to_pad_after_shape = array_ops.concat(
775
+ [batch_row_shape,
776
+ [self.domain_dimension_tensor() - num_cols]], axis=-1)
777
+ zeros_to_pad_after = array_ops.zeros(
778
+ shape=zeros_to_pad_after_shape, dtype=block.dtype)
779
+
780
+ rows.append(array_ops.concat(
781
+ [zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
782
+
783
+ mat = array_ops.concat(rows, axis=-2)
784
+ mat.set_shape(self.shape)
785
+ return mat
786
+
787
+ def _assert_non_singular(self):
788
+ return control_flow_ops.group([
789
+ operator.assert_non_singular() for operator in self.operators])
790
+
791
+ def _assert_self_adjoint(self):
792
+ return control_flow_ops.group([
793
+ operator.assert_self_adjoint() for operator in self.operators])
794
+
795
+ def _assert_positive_definite(self):
796
+ return control_flow_ops.group([
797
+ operator.assert_positive_definite() for operator in self.operators])
798
+
799
+ def _eigvals(self):
800
+ if not all(operator.is_square for operator in self.operators):
801
+ raise NotImplementedError(
802
+ "`eigvals` not implemented for an operator whose blocks are not "
803
+ "square.")
804
+ eig_list = []
805
+ for operator in self.operators:
806
+ # Extend the axis for broadcasting.
807
+ eig_list += [operator.eigvals()[..., array_ops.newaxis]]
808
+ eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list)
809
+ eigs = array_ops.concat(eig_list, axis=-2)
810
+ return array_ops.squeeze(eigs, axis=-1)
811
+
812
+ @property
813
+ def _composite_tensor_fields(self):
814
+ return ("operators",)
815
+
816
+ @property
817
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
818
+ return {"operators": [0] * len(self.operators)}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Create a blockwise lower-triangular operator from `LinearOperators`."""
16
+
17
+ from tensorflow.python.framework import common_shapes
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_conversion
21
+ from tensorflow.python.framework import tensor_shape
22
+ from tensorflow.python.ops import array_ops
23
+ from tensorflow.python.ops import array_ops_stack
24
+ from tensorflow.python.ops import check_ops
25
+ from tensorflow.python.ops import control_flow_ops
26
+ from tensorflow.python.ops import math_ops
27
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
28
+ from tensorflow.python.ops.linalg import linear_operator
29
+ from tensorflow.python.ops.linalg import linear_operator_addition
30
+ from tensorflow.python.ops.linalg import linear_operator_full_matrix
31
+ from tensorflow.python.ops.linalg import linear_operator_identity
32
+ from tensorflow.python.ops.linalg import linear_operator_util
33
+ from tensorflow.python.util import nest
34
+ from tensorflow.python.util.tf_export import tf_export
35
+
36
+ __all__ = ["LinearOperatorBlockLowerTriangular"]
37
+
38
+
39
+ @tf_export("linalg.LinearOperatorBlockLowerTriangular")
40
+ @linear_operator.make_composite_tensor
41
+ class LinearOperatorBlockLowerTriangular(linear_operator.LinearOperator):
42
+ """Combines `LinearOperators` into a blockwise lower-triangular matrix.
43
+
44
+ This operator is initialized with a nested list of linear operators, which
45
+ are combined into a new `LinearOperator` whose underlying matrix
46
+ representation is square and has each operator on or below the main diagonal,
47
+ and zero's elsewhere. Each element of the outer list is a list of
48
+ `LinearOperators` corresponding to a row-partition of the blockwise structure.
49
+ The number of `LinearOperator`s in row-partion `i` must be equal to `i`.
50
+
51
+ For example, a blockwise `3 x 3` `LinearOperatorBlockLowerTriangular` is
52
+ initialized with the list `[[op_00], [op_10, op_11], [op_20, op_21, op_22]]`,
53
+ where the `op_ij`, `i < 3, j <= i`, are `LinearOperator` instances. The
54
+ `LinearOperatorBlockLowerTriangular` behaves as the following blockwise
55
+ matrix, where `0` represents appropriately-sized [batch] matrices of zeros:
56
+
57
+ ```none
58
+ [[op_00, 0, 0],
59
+ [op_10, op_11, 0],
60
+ [op_20, op_21, op_22]]
61
+ ```
62
+
63
+ Each `op_jj` on the diagonal is required to represent a square matrix, and
64
+ hence will have shape `batch_shape_j + [M_j, M_j]`. `LinearOperator`s in row
65
+ `j` of the blockwise structure must have `range_dimension` equal to that of
66
+ `op_jj`, and `LinearOperators` in column `j` must have `domain_dimension`
67
+ equal to that of `op_jj`.
68
+
69
+ If each `op_jj` on the diagonal has shape `batch_shape_j + [M_j, M_j]`, then
70
+ the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`,
71
+ where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
72
+ `j = 0, 1, ..., J`, assuming the intermediate batch shapes broadcast.
73
+ Even if the combined shape is well defined, the combined operator's
74
+ methods may fail due to lack of broadcasting ability in the defining
75
+ operators' methods.
76
+
77
+ For example, to create a 4 x 4 linear operator combined of three 2 x 2
78
+ operators:
79
+ >>> operator_0 = tf.linalg.LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
80
+ >>> operator_1 = tf.linalg.LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
81
+ >>> operator_2 = tf.linalg.LinearOperatorLowerTriangular([[5., 6.], [7., 8]])
82
+ >>> operator = LinearOperatorBlockLowerTriangular(
83
+ ... [[operator_0], [operator_1, operator_2]])
84
+
85
+ >>> operator.to_dense()
86
+ <tf.Tensor: shape=(4, 4), dtype=float32, numpy=
87
+ array([[1., 2., 0., 0.],
88
+ [3., 4., 0., 0.],
89
+ [1., 0., 5., 0.],
90
+ [0., 1., 7., 8.]], dtype=float32)>
91
+
92
+ >>> operator.shape
93
+ TensorShape([4, 4])
94
+
95
+ >>> operator.log_abs_determinant()
96
+ <tf.Tensor: shape=(), dtype=float32, numpy=4.3820267>
97
+
98
+ >>> x0 = [[1., 6.], [-3., 4.]]
99
+ >>> x1 = [[0., 2.], [4., 0.]]
100
+ >>> x = tf.concat([x0, x1], 0) # Shape [2, 4] Tensor
101
+ >>> operator.matmul(x)
102
+ <tf.Tensor: shape=(4, 2), dtype=float32, numpy=
103
+ array([[-5., 14.],
104
+ [-9., 34.],
105
+ [ 1., 16.],
106
+ [29., 18.]], dtype=float32)>
107
+
108
+ The above `matmul` is equivalent to:
109
+ >>> tf.concat([operator_0.matmul(x0),
110
+ ... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0)
111
+ <tf.Tensor: shape=(4, 2), dtype=float32, numpy=
112
+ array([[-5., 14.],
113
+ [-9., 34.],
114
+ [ 1., 16.],
115
+ [29., 18.]], dtype=float32)>
116
+
117
+ #### Shape compatibility
118
+
119
+ This operator acts on [batch] matrix with compatible shape.
120
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
121
+
122
+ ```
123
+ operator.shape = [B1,...,Bb] + [M, N], with b >= 0
124
+ x.shape = [B1,...,Bb] + [N, R], with R >= 0.
125
+ ```
126
+
127
+ For example:
128
+
129
+ Create a [2, 3] batch of 4 x 4 linear operators:
130
+ >>> matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
131
+ >>> operator_44 = tf.linalg.LinearOperatorFullMatrix(matrix_44)
132
+
133
+ Create a [1, 3] batch of 5 x 4 linear operators:
134
+ >>> matrix_54 = tf.random.normal(shape=[1, 3, 5, 4])
135
+ >>> operator_54 = tf.linalg.LinearOperatorFullMatrix(matrix_54)
136
+
137
+ Create a [1, 3] batch of 5 x 5 linear operators:
138
+ >>> matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
139
+ >>> operator_55 = tf.linalg.LinearOperatorFullMatrix(matrix_55)
140
+
141
+ Combine to create a [2, 3] batch of 9 x 9 operators:
142
+ >>> operator_99 = LinearOperatorBlockLowerTriangular(
143
+ ... [[operator_44], [operator_54, operator_55]])
144
+ >>> operator_99.shape
145
+ TensorShape([2, 3, 9, 9])
146
+
147
+ Create a shape [2, 1, 9] batch of vectors and apply the operator to it.
148
+ >>> x = tf.random.normal(shape=[2, 1, 9])
149
+ >>> y = operator_99.matvec(x)
150
+ >>> y.shape
151
+ TensorShape([2, 3, 9])
152
+
153
+ Create a blockwise list of vectors and apply the operator to it. A blockwise
154
+ list is returned.
155
+ >>> x4 = tf.random.normal(shape=[2, 1, 4])
156
+ >>> x5 = tf.random.normal(shape=[2, 3, 5])
157
+ >>> y_blockwise = operator_99.matvec([x4, x5])
158
+ >>> y_blockwise[0].shape
159
+ TensorShape([2, 3, 4])
160
+ >>> y_blockwise[1].shape
161
+ TensorShape([2, 3, 5])
162
+
163
+ #### Performance
164
+
165
+ Suppose `operator` is a `LinearOperatorBlockLowerTriangular` consisting of `D`
166
+ row-partitions and `D` column-partitions, such that the total number of
167
+ operators is `N = D * (D + 1) // 2`.
168
+
169
+ * `operator.matmul` has complexity equal to the sum of the `matmul`
170
+ complexities of the individual operators.
171
+ * `operator.solve` has complexity equal to the sum of the `solve` complexities
172
+ of the operators on the diagonal and the `matmul` complexities of the
173
+ operators off the diagonal.
174
+ * `operator.determinant` has complexity equal to the sum of the `determinant`
175
+ complexities of the operators on the diagonal.
176
+
177
+ #### Matrix property hints
178
+
179
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
180
+ for `X = non_singular, self_adjoint, positive_definite, square`.
181
+ These have the following meaning:
182
+
183
+ * If `is_X == True`, callers should expect the operator to have the
184
+ property `X`. This is a promise that should be fulfilled, but is *not* a
185
+ runtime assert. For example, finite floating point precision may result
186
+ in these promises being violated.
187
+ * If `is_X == False`, callers should expect the operator to not have `X`.
188
+ * If `is_X == None` (the default), callers should have no expectation either
189
+ way.
190
+ """
191
+
192
+ def __init__(self,
193
+ operators,
194
+ is_non_singular=None,
195
+ is_self_adjoint=None,
196
+ is_positive_definite=None,
197
+ is_square=None,
198
+ name="LinearOperatorBlockLowerTriangular"):
199
+ r"""Initialize a `LinearOperatorBlockLowerTriangular`.
200
+
201
+ `LinearOperatorBlockLowerTriangular` is initialized with a list of lists of
202
+ operators `[[op_0], [op_1, op_2], [op_3, op_4, op_5],...]`.
203
+
204
+ Args:
205
+ operators: Iterable of iterables of `LinearOperator` objects, each with
206
+ the same `dtype`. Each element of `operators` corresponds to a row-
207
+ partition, in top-to-bottom order. The operators in each row-partition
208
+ are filled in left-to-right. For example,
209
+ `operators = [[op_0], [op_1, op_2], [op_3, op_4, op_5]]` creates a
210
+ `LinearOperatorBlockLowerTriangular` with full block structure
211
+ `[[op_0, 0, 0], [op_1, op_2, 0], [op_3, op_4, op_5]]`. The number of
212
+ operators in the `i`th row must be equal to `i`, such that each operator
213
+ falls on or below the diagonal of the blockwise structure.
214
+ `LinearOperator`s that fall on the diagonal (the last elements of each
215
+ row) must be square. The other `LinearOperator`s must have domain
216
+ dimension equal to the domain dimension of the `LinearOperator`s in the
217
+ same column-partition, and range dimension equal to the range dimension
218
+ of the `LinearOperator`s in the same row-partition.
219
+ is_non_singular: Expect that this operator is non-singular.
220
+ is_self_adjoint: Expect that this operator is equal to its hermitian
221
+ transpose.
222
+ is_positive_definite: Expect that this operator is positive definite,
223
+ meaning the quadratic form `x^H A x` has positive real part for all
224
+ nonzero `x`. Note that we do not require the operator to be
225
+ self-adjoint to be positive-definite. See:
226
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
227
+ is_square: Expect that this operator acts like square [batch] matrices.
228
+ This will raise a `ValueError` if set to `False`.
229
+ name: A name for this `LinearOperator`.
230
+
231
+ Raises:
232
+ TypeError: If all operators do not have the same `dtype`.
233
+ ValueError: If `operators` is empty, contains an erroneous number of
234
+ elements, or contains operators with incompatible shapes.
235
+ """
236
+ parameters = dict(
237
+ operators=operators,
238
+ is_non_singular=is_non_singular,
239
+ is_self_adjoint=is_self_adjoint,
240
+ is_positive_definite=is_positive_definite,
241
+ is_square=is_square,
242
+ name=name
243
+ )
244
+
245
+ # Validate operators.
246
+ check_ops.assert_proper_iterable(operators)
247
+ for row in operators:
248
+ check_ops.assert_proper_iterable(row)
249
+ operators = [list(row) for row in operators]
250
+
251
+ if not operators:
252
+ raise ValueError(f"Argument `operators` must be a list of >=1 operators. "
253
+ f"Received: {operators}.")
254
+ self._operators = operators
255
+ self._diagonal_operators = [row[-1] for row in operators]
256
+
257
+ dtype = operators[0][0].dtype
258
+ self._validate_dtype(dtype)
259
+ is_non_singular = self._validate_non_singular(is_non_singular)
260
+ self._validate_num_operators()
261
+ self._validate_operator_dimensions()
262
+ is_square = self._validate_square(is_square)
263
+ with ops.name_scope(name):
264
+ super(LinearOperatorBlockLowerTriangular, self).__init__(
265
+ dtype=dtype,
266
+ is_non_singular=is_non_singular,
267
+ is_self_adjoint=is_self_adjoint,
268
+ is_positive_definite=is_positive_definite,
269
+ is_square=is_square,
270
+ parameters=parameters,
271
+ name=name)
272
+
273
+ def _validate_num_operators(self):
274
+ for i, row in enumerate(self.operators):
275
+ if len(row) != i + 1:
276
+ raise ValueError(
277
+ f"Argument `operators[{i}]` must contain `{i + 1}` blocks. "
278
+ f"Received: {len(row)} blocks.")
279
+
280
+ def _validate_operator_dimensions(self):
281
+ """Check that `operators` have compatible dimensions."""
282
+ for i in range(1, len(self.operators)):
283
+ for j in range(i):
284
+ op = self.operators[i][j]
285
+
286
+ # `above_op` is the operator directly above `op` in the blockwise
287
+ # structure, in row partition `i-1`, column partition `j`. `op` should
288
+ # have the same `domain_dimension` as `above_op`.
289
+ above_op = self.operators[i - 1][j]
290
+
291
+ # `right_op` is the operator to the right of `op` in the blockwise
292
+ # structure, in row partition `i`, column partition `j+1`. `op` should
293
+ # have the same `range_dimension` as `right_op`.
294
+ right_op = self.operators[i][j + 1]
295
+
296
+ if (op.domain_dimension is not None and
297
+ above_op.domain_dimension is not None):
298
+ if op.domain_dimension != above_op.domain_dimension:
299
+ raise ValueError(f"Argument `operators[{i}][{j}].domain_dimension` "
300
+ f"({op.domain_dimension}) must be the same as "
301
+ f"`operators[{i-1}][{j}].domain_dimension` "
302
+ f"({above_op.domain_dimension}).")
303
+ if (op.range_dimension is not None and
304
+ right_op.range_dimension is not None):
305
+ if op.range_dimension != right_op.range_dimension:
306
+ raise ValueError(f"Argument `operators[{i}][{j}].range_dimension` "
307
+ f"({op.range_dimension}) must be the same as "
308
+ f"`operators[{i}][{j + 1}].range_dimension` "
309
+ f"({right_op.range_dimension}).")
310
+
311
+ # pylint: disable=g-bool-id-comparison
312
+ def _validate_non_singular(self, is_non_singular):
313
+ if all(op.is_non_singular for op in self._diagonal_operators):
314
+ if is_non_singular is False:
315
+ raise ValueError(
316
+ f"A blockwise lower-triangular operator with non-singular "
317
+ f"operators on the main diagonal is always non-singular. "
318
+ f"Expected argument `is_non_singular` to be True. "
319
+ f"Received: {is_non_singular}.")
320
+ return True
321
+ if any(op.is_non_singular is False for op in self._diagonal_operators):
322
+ if is_non_singular is True:
323
+ raise ValueError(
324
+ f"A blockwise lower-triangular operator with a singular operator "
325
+ f"on the main diagonal is always singular. Expected argument "
326
+ f"`is_non_singular` to be True. Received: {is_non_singular}.")
327
+ return False
328
+
329
+ def _validate_square(self, is_square):
330
+ if is_square is False:
331
+ raise ValueError(f"`LinearOperatorBlockLowerTriangular` must be square. "
332
+ f"Expected argument `is_square` to be True. "
333
+ f"Received: {is_square}.")
334
+ for i, op in enumerate(self._diagonal_operators):
335
+ if op.is_square is False:
336
+ raise ValueError(
337
+ f"Matrices on the diagonal (the final elements of each "
338
+ f"row-partition in the `operators` list) must be square. Expected "
339
+ f"argument `operators[{i}][-1].is_square` to be True. "
340
+ f"Received: {op.is_square}.")
341
+ return True
342
+ # pylint: enable=g-bool-id-comparison
343
+
344
+ def _validate_dtype(self, dtype):
345
+ for i, row in enumerate(self.operators):
346
+ for operator in row:
347
+ if operator.dtype != dtype:
348
+ name_type = (str((o.name, o.dtype)) for o in row)
349
+ raise TypeError(
350
+ "Expected all operators to have the same dtype. Found {} in row "
351
+ "{} and {} in row 0.".format(name_type, i, str(dtype)))
352
+
353
+ @property
354
+ def operators(self):
355
+ return self._operators
356
+
357
+ def _block_range_dimensions(self):
358
+ return [op.range_dimension for op in self._diagonal_operators]
359
+
360
+ def _block_domain_dimensions(self):
361
+ return [op.domain_dimension for op in self._diagonal_operators]
362
+
363
+ def _block_range_dimension_tensors(self):
364
+ return [op.range_dimension_tensor() for op in self._diagonal_operators]
365
+
366
+ def _block_domain_dimension_tensors(self):
367
+ return [op.domain_dimension_tensor() for op in self._diagonal_operators]
368
+
369
+ def _shape(self):
370
+ # Get final matrix shape.
371
+ domain_dimension = sum(self._block_domain_dimensions())
372
+ range_dimension = sum(self._block_range_dimensions())
373
+ matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
374
+
375
+ # Get broadcast batch shape.
376
+ # broadcast_shape checks for compatibility.
377
+ batch_shape = self.operators[0][0].batch_shape
378
+ for row in self.operators[1:]:
379
+ for operator in row:
380
+ batch_shape = common_shapes.broadcast_shape(
381
+ batch_shape, operator.batch_shape)
382
+
383
+ return batch_shape.concatenate(matrix_shape)
384
+
385
+ def _shape_tensor(self):
386
+ # Avoid messy broadcasting if possible.
387
+ if self.shape.is_fully_defined():
388
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(
389
+ self.shape.as_list(), dtype=dtypes.int32, name="shape"
390
+ )
391
+
392
+ domain_dimension = sum(self._block_domain_dimension_tensors())
393
+ range_dimension = sum(self._block_range_dimension_tensors())
394
+ matrix_shape = array_ops_stack.stack([domain_dimension, range_dimension])
395
+
396
+ batch_shape = self.operators[0][0].batch_shape_tensor()
397
+ for row in self.operators[1:]:
398
+ for operator in row:
399
+ batch_shape = array_ops.broadcast_dynamic_shape(
400
+ batch_shape, operator.batch_shape_tensor())
401
+
402
+ return array_ops.concat((batch_shape, matrix_shape), 0)
403
+
404
+ def _linop_inverse(self) -> "LinearOperatorBlockLowerTriangular":
405
+ """Inverse of LinearOperatorBlockLowerTriangular.
406
+
407
+ We recursively apply the identity:
408
+
409
+ ```none
410
+ |A 0|' = | A' 0|
411
+ |B C| |-C'BA' C'|
412
+ ```
413
+
414
+ where `A` is n-by-n, `B` is m-by-n,
415
+ `C` is m-by-m, and `'` denotes inverse.
416
+
417
+ This identity can be verified through multiplication:
418
+
419
+ ```none
420
+ |A 0|| A' 0|
421
+ |B C||-C'BA' C'|
422
+
423
+ = | AA' 0|
424
+ |BA'-CC'BA' CC'|
425
+
426
+ = |I 0|
427
+ |0 I|
428
+ ```
429
+ Returns:
430
+ A 'LinearOperatorBlockLowerTriangular'.
431
+ """
432
+ if len(self.operators) == 1:
433
+ return (LinearOperatorBlockLowerTriangular(
434
+ [[self.operators[0][0].inverse()]],
435
+ is_non_singular=self.is_non_singular,
436
+ is_self_adjoint=self.is_self_adjoint,
437
+ is_positive_definite=(self.
438
+ is_positive_definite),
439
+ is_square=True))
440
+
441
+ blockwise_dim = len(self.operators)
442
+
443
+ # Calculate the inverse of the `LinearOperatorBlockLowerTriangular`
444
+ # representing all but the last row of `self` with
445
+ # a recursive call (the matrix `A'` in the docstring definition).
446
+ upper_left_inverse = (
447
+ LinearOperatorBlockLowerTriangular(self.operators[:-1]).inverse())
448
+
449
+ bottom_row = self.operators[-1]
450
+ bottom_right_inverse = bottom_row[-1].inverse()
451
+
452
+ # Find the bottom row of the inverse (equal to `[-C'BA', C']`
453
+ # in the docstring definition, where `C` is the bottom-right operator of
454
+ # `self` and `B` is the set of operators in the
455
+ # bottom row excluding `C`). To find `-C'BA'`, we first iterate over the
456
+ # column partitions of `A'`.
457
+ inverse_bottom_row = []
458
+ for i in range(blockwise_dim - 1):
459
+ # Find the `i`-th block of `BA'`.
460
+ blocks = []
461
+ for j in range(i, blockwise_dim - 1):
462
+ result = bottom_row[j].matmul(upper_left_inverse.operators[j][i])
463
+ if not any(
464
+ isinstance(result, op_type)
465
+ for op_type in linear_operator_addition.SUPPORTED_OPERATORS
466
+ ):
467
+ result = linear_operator_full_matrix.LinearOperatorFullMatrix(
468
+ result.to_dense())
469
+ blocks.append(result)
470
+
471
+ summed_blocks = linear_operator_addition.add_operators(blocks)
472
+ assert len(summed_blocks) == 1
473
+ block = summed_blocks[0]
474
+
475
+ # Find the `i`-th block of `-C'BA'`.
476
+ block = bottom_right_inverse.matmul(block)
477
+ block = linear_operator_identity.LinearOperatorScaledIdentity(
478
+ num_rows=bottom_right_inverse.domain_dimension_tensor(),
479
+ multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block)
480
+ inverse_bottom_row.append(block)
481
+
482
+ # `C'` is the last block of the inverted linear operator.
483
+ inverse_bottom_row.append(bottom_right_inverse)
484
+
485
+ return (LinearOperatorBlockLowerTriangular(
486
+ upper_left_inverse.operators + [inverse_bottom_row],
487
+ is_non_singular=self.is_non_singular,
488
+ is_self_adjoint=self.is_self_adjoint,
489
+ is_positive_definite=(self.is_positive_definite),
490
+ is_square=True))
491
+
492
+ def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
493
+ """Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
494
+
495
+ ```python
496
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
497
+ operator = LinearOperator(...)
498
+ operator.shape = [..., M, N]
499
+
500
+ X = ... # shape [..., N, R], batch matrix, R > 0.
501
+
502
+ Y = operator.matmul(X)
503
+ Y.shape
504
+ ==> [..., M, R]
505
+
506
+ Y[..., :, r] = sum_j A[..., :, j] X[j, r]
507
+ ```
508
+
509
+ Args:
510
+ x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as
511
+ `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See
512
+ class docstring for definition of shape compatibility.
513
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
514
+ adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
515
+ the hermitian transpose (transposition and complex conjugation).
516
+ name: A name for this `Op`.
517
+
518
+ Returns:
519
+ A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
520
+ as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that
521
+ concatenate to `[..., M, R]`.
522
+ """
523
+ if isinstance(x, linear_operator.LinearOperator):
524
+ left_operator = self.adjoint() if adjoint else self
525
+ right_operator = x.adjoint() if adjoint_arg else x
526
+
527
+ if (right_operator.range_dimension is not None and
528
+ left_operator.domain_dimension is not None and
529
+ right_operator.range_dimension != left_operator.domain_dimension):
530
+ raise ValueError(
531
+ "Operators are incompatible. Expected `x` to have dimension"
532
+ " {} but got {}.".format(
533
+ left_operator.domain_dimension, right_operator.range_dimension))
534
+ with self._name_scope(name): # pylint: disable=not-callable
535
+ return self._linop_matmul(left_operator, right_operator)
536
+
537
+ with self._name_scope(name): # pylint: disable=not-callable
538
+ arg_dim = -1 if adjoint_arg else -2
539
+ block_dimensions = (self._block_range_dimensions() if adjoint
540
+ else self._block_domain_dimensions())
541
+ if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):
542
+ for i, block in enumerate(x):
543
+ if not isinstance(block, linear_operator.LinearOperator):
544
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
545
+ self._check_input_dtype(block)
546
+ block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
547
+ x[i] = block
548
+ else:
549
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
550
+ self._check_input_dtype(x)
551
+ op_dimension = (self.range_dimension if adjoint
552
+ else self.domain_dimension)
553
+ op_dimension.assert_is_compatible_with(x.shape[arg_dim])
554
+ return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
555
+
556
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
557
+ arg_dim = -1 if adjoint_arg else -2
558
+ block_dimensions = (self._block_range_dimensions() if adjoint
559
+ else self._block_domain_dimensions())
560
+ blockwise_arg = linear_operator_util.arg_is_blockwise(
561
+ block_dimensions, x, arg_dim)
562
+ if blockwise_arg:
563
+ split_x = x
564
+ else:
565
+ split_dim = -1 if adjoint_arg else -2
566
+ # Split input by columns if adjoint_arg is True, else rows
567
+ split_x = linear_operator_util.split_arg_into_blocks(
568
+ self._block_domain_dimensions(),
569
+ self._block_domain_dimension_tensors,
570
+ x, axis=split_dim)
571
+
572
+ result_list = []
573
+ # Iterate over row-partitions (i.e. column-partitions of the adjoint).
574
+ if adjoint:
575
+ for index in range(len(self.operators)):
576
+ # Begin with the operator on the diagonal and apply it to the
577
+ # respective `rhs` block.
578
+ result = self.operators[index][index].matmul(
579
+ split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)
580
+
581
+ # Iterate top to bottom over the operators in the remainder of the
582
+ # column-partition (i.e. left to right over the row-partition of the
583
+ # adjoint), apply the operator to the respective `rhs` block and
584
+ # accumulate the sum. For example, given the
585
+ # `LinearOperatorBlockLowerTriangular`:
586
+ #
587
+ # op = [[A, 0, 0],
588
+ # [B, C, 0],
589
+ # [D, E, F]]
590
+ #
591
+ # if `index = 1`, the following loop calculates:
592
+ # `y_1 = (C.matmul(x_1, adjoint=adjoint) +
593
+ # E.matmul(x_2, adjoint=adjoint)`,
594
+ # where `x_1` and `x_2` are splits of `x`.
595
+ for j in range(index + 1, len(self.operators)):
596
+ result += self.operators[j][index].matmul(
597
+ split_x[j], adjoint=adjoint, adjoint_arg=adjoint_arg)
598
+ result_list.append(result)
599
+ else:
600
+ for row in self.operators:
601
+ # Begin with the left-most operator in the row-partition and apply it
602
+ # to the first `rhs` block.
603
+ result = row[0].matmul(
604
+ split_x[0], adjoint=adjoint, adjoint_arg=adjoint_arg)
605
+ # Iterate left to right over the operators in the remainder of the row
606
+ # partition, apply the operator to the respective `rhs` block, and
607
+ # accumulate the sum.
608
+ for j, operator in enumerate(row[1:]):
609
+ result += operator.matmul(
610
+ split_x[j + 1], adjoint=adjoint, adjoint_arg=adjoint_arg)
611
+ result_list.append(result)
612
+
613
+ if blockwise_arg:
614
+ return result_list
615
+
616
+ result_list = linear_operator_util.broadcast_matrix_batch_dims(
617
+ result_list)
618
+ return array_ops.concat(result_list, axis=-2)
619
+
620
+ def matvec(self, x, adjoint=False, name="matvec"):
621
+ """Transform [batch] vector `x` with left multiplication: `x --> Ax`.
622
+
623
+ ```python
624
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
625
+ operator = LinearOperator(...)
626
+
627
+ X = ... # shape [..., N], batch vector
628
+
629
+ Y = operator.matvec(X)
630
+ Y.shape
631
+ ==> [..., M]
632
+
633
+ Y[..., :] = sum_j A[..., :, j] X[..., j]
634
+ ```
635
+
636
+ Args:
637
+ x: `Tensor` with compatible shape and same `dtype` as `self`, or an
638
+ iterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning
639
+ for every set of leading dimensions, the last dimension defines a
640
+ vector.
641
+ See class docstring for definition of compatibility.
642
+ adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
643
+ name: A name for this `Op`.
644
+
645
+ Returns:
646
+ A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
647
+ """
648
+ with self._name_scope(name): # pylint: disable=not-callable
649
+ block_dimensions = (self._block_range_dimensions() if adjoint
650
+ else self._block_domain_dimensions())
651
+ if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):
652
+ for i, block in enumerate(x):
653
+ if not isinstance(block, linear_operator.LinearOperator):
654
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
655
+ self._check_input_dtype(block)
656
+ block_dimensions[i].assert_is_compatible_with(block.shape[-1])
657
+ x[i] = block
658
+ x_mat = [block[..., array_ops.newaxis] for block in x]
659
+ y_mat = self.matmul(x_mat, adjoint=adjoint)
660
+ return [array_ops.squeeze(y, axis=-1) for y in y_mat]
661
+
662
+ x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x")
663
+ self._check_input_dtype(x)
664
+ op_dimension = (self.range_dimension if adjoint
665
+ else self.domain_dimension)
666
+ op_dimension.assert_is_compatible_with(x.shape[-1])
667
+ x_mat = x[..., array_ops.newaxis]
668
+ y_mat = self.matmul(x_mat, adjoint=adjoint)
669
+ return array_ops.squeeze(y_mat, axis=-1)
670
+
671
+ def _determinant(self):
672
+ if all(op.is_positive_definite for op in self._diagonal_operators):
673
+ return math_ops.exp(self._log_abs_determinant())
674
+ result = self._diagonal_operators[0].determinant()
675
+ for op in self._diagonal_operators[1:]:
676
+ result *= op.determinant()
677
+ return result
678
+
679
+ def _log_abs_determinant(self):
680
+ result = self._diagonal_operators[0].log_abs_determinant()
681
+ for op in self._diagonal_operators[1:]:
682
+ result += op.log_abs_determinant()
683
+ return result
684
+
685
+ def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
686
+ """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
687
+
688
+ The returned `Tensor` will be close to an exact solution if `A` is well
689
+ conditioned. Otherwise closeness will vary. See class docstring for details.
690
+
691
+ Given the blockwise `n + 1`-by-`n + 1` linear operator:
692
+
693
+ op = [[A_00 0 ... 0 ... 0],
694
+ [A_10 A_11 ... 0 ... 0],
695
+ ...
696
+ [A_k0 A_k1 ... A_kk ... 0],
697
+ ...
698
+ [A_n0 A_n1 ... A_nk ... A_nn]]
699
+
700
+ we find `x = op.solve(y)` by observing that
701
+
702
+ `y_k = A_k0.matmul(x_0) + A_k1.matmul(x_1) + ... + A_kk.matmul(x_k)`
703
+
704
+ and therefore
705
+
706
+ `x_k = A_kk.solve(y_k -
707
+ A_k0.matmul(x_0) - ... - A_k(k-1).matmul(x_(k-1)))`
708
+
709
+ where `x_k` and `y_k` are the `k`th blocks obtained by decomposing `x`
710
+ and `y` along their appropriate axes.
711
+
712
+ We first solve `x_0 = A_00.solve(y_0)`. Proceeding inductively, we solve
713
+ for `x_k`, `k = 1..n`, given `x_0..x_(k-1)`.
714
+
715
+ The adjoint case is solved similarly, beginning with
716
+ `x_n = A_nn.solve(y_n, adjoint=True)` and proceeding backwards.
717
+
718
+ Examples:
719
+
720
+ ```python
721
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
722
+ operator = LinearOperator(...)
723
+ operator.shape = [..., M, N]
724
+
725
+ # Solve R > 0 linear systems for every member of the batch.
726
+ RHS = ... # shape [..., M, R]
727
+
728
+ X = operator.solve(RHS)
729
+ # X[..., :, r] is the solution to the r'th linear system
730
+ # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
731
+
732
+ operator.matmul(X)
733
+ ==> RHS
734
+ ```
735
+
736
+ Args:
737
+ rhs: `Tensor` with same `dtype` as this operator and compatible shape,
738
+ or a list of `Tensor`s. `Tensor`s are treated like a [batch] matrices
739
+ meaning for every set of leading dimensions, the last two dimensions
740
+ defines a matrix.
741
+ See class docstring for definition of compatibility.
742
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
743
+ of this `LinearOperator`: `A^H X = rhs`.
744
+ adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
745
+ is the hermitian transpose (transposition and complex conjugation).
746
+ name: A name scope to use for ops added by this method.
747
+
748
+ Returns:
749
+ `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
750
+
751
+ Raises:
752
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
753
+ """
754
+ if self.is_non_singular is False:
755
+ raise NotImplementedError(
756
+ "Exact solve not implemented for an operator that is expected to "
757
+ "be singular.")
758
+ if self.is_square is False:
759
+ raise NotImplementedError(
760
+ "Exact solve not implemented for an operator that is expected to "
761
+ "not be square.")
762
+ if isinstance(rhs, linear_operator.LinearOperator):
763
+ left_operator = self.adjoint() if adjoint else self
764
+ right_operator = rhs.adjoint() if adjoint_arg else rhs
765
+
766
+ if (right_operator.range_dimension is not None and
767
+ left_operator.domain_dimension is not None and
768
+ right_operator.range_dimension != left_operator.domain_dimension):
769
+ raise ValueError(
770
+ "Operators are incompatible. Expected `rhs` to have dimension"
771
+ " {} but got {}.".format(
772
+ left_operator.domain_dimension, right_operator.range_dimension))
773
+ with self._name_scope(name): # pylint: disable=not-callable
774
+ return self._linop_solve(left_operator, right_operator)
775
+
776
+ with self._name_scope(name): # pylint: disable=not-callable
777
+ block_dimensions = (self._block_domain_dimensions() if adjoint
778
+ else self._block_range_dimensions())
779
+ arg_dim = -1 if adjoint_arg else -2
780
+ blockwise_arg = linear_operator_util.arg_is_blockwise(
781
+ block_dimensions, rhs, arg_dim)
782
+ if blockwise_arg:
783
+ for i, block in enumerate(rhs):
784
+ if not isinstance(block, linear_operator.LinearOperator):
785
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
786
+ self._check_input_dtype(block)
787
+ block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
788
+ rhs[i] = block
789
+ if adjoint_arg:
790
+ split_rhs = [linalg.adjoint(y) for y in rhs]
791
+ else:
792
+ split_rhs = rhs
793
+
794
+ else:
795
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
796
+ rhs, name="rhs"
797
+ )
798
+ self._check_input_dtype(rhs)
799
+ op_dimension = (self.domain_dimension if adjoint
800
+ else self.range_dimension)
801
+ op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])
802
+
803
+ rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
804
+ split_rhs = linear_operator_util.split_arg_into_blocks(
805
+ self._block_domain_dimensions(),
806
+ self._block_domain_dimension_tensors,
807
+ rhs, axis=-2)
808
+
809
+ solution_list = []
810
+ if adjoint:
811
+ # For an adjoint blockwise lower-triangular linear operator, the system
812
+ # must be solved bottom to top. Iterate backwards over rows of the
813
+ # adjoint (i.e. columns of the non-adjoint operator).
814
+ for index in reversed(range(len(self.operators))):
815
+ y = split_rhs[index]
816
+ # Iterate top to bottom over the operators in the off-diagonal portion
817
+ # of the column-partition (i.e. row-partition of the adjoint), apply
818
+ # the operator to the respective block of the solution found in
819
+ # previous iterations, and subtract the result from the `rhs` block.
820
+ # For example,let `A`, `B`, and `D` be the linear operators in the top
821
+ # row-partition of the adjoint of
822
+ # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])`,
823
+ # and `x_1` and `x_2` be blocks of the solution found in previous
824
+ # iterations of the outer loop. The following loop (when `index == 0`)
825
+ # expresses
826
+ # `Ax_0 + Bx_1 + Dx_2 = y_0` as `Ax_0 = y_0*`, where
827
+ # `y_0* = y_0 - Bx_1 - Dx_2`.
828
+ for j in reversed(range(index + 1, len(self.operators))):
829
+ y = y - self.operators[j][index].matmul(
830
+ solution_list[len(self.operators) - 1 - j],
831
+ adjoint=adjoint)
832
+ # Continuing the example above, solve `Ax_0 = y_0*` for `x_0`.
833
+ solution_list.append(
834
+ self._diagonal_operators[index].solve(y, adjoint=adjoint))
835
+ solution_list.reverse()
836
+ else:
837
+ # Iterate top to bottom over the row-partitions.
838
+ for row, y in zip(self.operators, split_rhs):
839
+ # Iterate left to right over the operators in the off-diagonal portion
840
+ # of the row-partition, apply the operator to the block of the
841
+ # solution found in previous iterations, and subtract the result from
842
+ # the `rhs` block. For example, let `D`, `E`, and `F` be the linear
843
+ # operators in the bottom row-partition of
844
+ # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])` and
845
+ # `x_0` and `x_1` be blocks of the solution found in previous
846
+ # iterations of the outer loop. The following loop
847
+ # (when `index == 2`), expresses
848
+ # `Dx_0 + Ex_1 + Fx_2 = y_2` as `Fx_2 = y_2*`, where
849
+ # `y_2* = y_2 - D_x0 - Ex_1`.
850
+ for i, operator in enumerate(row[:-1]):
851
+ y = y - operator.matmul(solution_list[i], adjoint=adjoint)
852
+ # Continuing the example above, solve `Fx_2 = y_2*` for `x_2`.
853
+ solution_list.append(row[-1].solve(y, adjoint=adjoint))
854
+
855
+ if blockwise_arg:
856
+ return solution_list
857
+
858
+ solution_list = linear_operator_util.broadcast_matrix_batch_dims(
859
+ solution_list)
860
+ return array_ops.concat(solution_list, axis=-2)
861
+
862
+ def solvevec(self, rhs, adjoint=False, name="solve"):
863
+ """Solve single equation with best effort: `A X = rhs`.
864
+
865
+ The returned `Tensor` will be close to an exact solution if `A` is well
866
+ conditioned. Otherwise closeness will vary. See class docstring for details.
867
+
868
+ Examples:
869
+
870
+ ```python
871
+ # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
872
+ operator = LinearOperator(...)
873
+ operator.shape = [..., M, N]
874
+
875
+ # Solve one linear system for every member of the batch.
876
+ RHS = ... # shape [..., M]
877
+
878
+ X = operator.solvevec(RHS)
879
+ # X is the solution to the linear system
880
+ # sum_j A[..., :, j] X[..., j] = RHS[..., :]
881
+
882
+ operator.matvec(X)
883
+ ==> RHS
884
+ ```
885
+
886
+ Args:
887
+ rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s
888
+ (for blockwise operators). `Tensor`s are treated as [batch] vectors,
889
+ meaning for every set of leading dimensions, the last dimension defines
890
+ a vector. See class docstring for definition of compatibility regarding
891
+ batch dimensions.
892
+ adjoint: Python `bool`. If `True`, solve the system involving the adjoint
893
+ of this `LinearOperator`: `A^H X = rhs`.
894
+ name: A name scope to use for ops added by this method.
895
+
896
+ Returns:
897
+ `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
898
+
899
+ Raises:
900
+ NotImplementedError: If `self.is_non_singular` or `is_square` is False.
901
+ """
902
+ with self._name_scope(name): # pylint: disable=not-callable
903
+ block_dimensions = (self._block_domain_dimensions() if adjoint
904
+ else self._block_range_dimensions())
905
+ if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):
906
+ for i, block in enumerate(rhs):
907
+ if not isinstance(block, linear_operator.LinearOperator):
908
+ block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)
909
+ self._check_input_dtype(block)
910
+ block_dimensions[i].assert_is_compatible_with(block.shape[-1])
911
+ rhs[i] = block
912
+ rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]
913
+ solution_mat = self.solve(rhs_mat, adjoint=adjoint)
914
+ return [array_ops.squeeze(x, axis=-1) for x in solution_mat]
915
+ rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
916
+ rhs, name="rhs"
917
+ )
918
+ self._check_input_dtype(rhs)
919
+ op_dimension = (self.domain_dimension if adjoint
920
+ else self.range_dimension)
921
+ op_dimension.assert_is_compatible_with(rhs.shape[-1])
922
+ rhs_mat = array_ops.expand_dims(rhs, axis=-1)
923
+ solution_mat = self.solve(rhs_mat, adjoint=adjoint)
924
+ return array_ops.squeeze(solution_mat, axis=-1)
925
+
926
+ def _diag_part(self):
927
+ diag_list = []
928
+ for op in self._diagonal_operators:
929
+ # Extend the axis, since `broadcast_matrix_batch_dims` treats all but the
930
+ # final two dimensions as batch dimensions.
931
+ diag_list.append(op.diag_part()[..., array_ops.newaxis])
932
+ diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
933
+ diagonal = array_ops.concat(diag_list, axis=-2)
934
+ return array_ops.squeeze(diagonal, axis=-1)
935
+
936
+ def _trace(self):
937
+ result = self._diagonal_operators[0].trace()
938
+ for op in self._diagonal_operators[1:]:
939
+ result += op.trace()
940
+ return result
941
+
942
+ def _to_dense(self):
943
+ num_cols = 0
944
+ dense_rows = []
945
+ flat_broadcast_operators = linear_operator_util.broadcast_matrix_batch_dims(
946
+ [op.to_dense() for row in self.operators for op in row]) # pylint: disable=g-complex-comprehension
947
+ broadcast_operators = [
948
+ flat_broadcast_operators[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
949
+ for i in range(len(self.operators))]
950
+ for row_blocks in broadcast_operators:
951
+ batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
952
+ num_cols += array_ops.shape(row_blocks[-1])[-1]
953
+ zeros_to_pad_after_shape = array_ops.concat(
954
+ [batch_row_shape,
955
+ [self.domain_dimension_tensor() - num_cols]], axis=-1)
956
+ zeros_to_pad_after = array_ops.zeros(
957
+ shape=zeros_to_pad_after_shape, dtype=self.dtype)
958
+
959
+ row_blocks.append(zeros_to_pad_after)
960
+ dense_rows.append(array_ops.concat(row_blocks, axis=-1))
961
+
962
+ mat = array_ops.concat(dense_rows, axis=-2)
963
+ mat.set_shape(self.shape)
964
+ return mat
965
+
966
+ def _assert_non_singular(self):
967
+ return control_flow_ops.group([
968
+ op.assert_non_singular() for op in self._diagonal_operators])
969
+
970
+ def _eigvals(self):
971
+ eig_list = []
972
+ for op in self._diagonal_operators:
973
+ # Extend the axis for broadcasting.
974
+ eig_list.append(op.eigvals()[..., array_ops.newaxis])
975
+ eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list)
976
+ eigs = array_ops.concat(eig_list, axis=-2)
977
+ return array_ops.squeeze(eigs, axis=-1)
978
+
979
+ @property
980
+ def _composite_tensor_fields(self):
981
+ return ("operators",)
982
+
983
+ @property
984
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
985
+ # None of the operators contribute to the matrix shape.
986
+ return {"operators": nest.map_structure(lambda _: 0, self.operators)}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_circulant.py ADDED
@@ -0,0 +1,1551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`LinearOperator` coming from a [[nested] block] circulant matrix."""
16
+
17
+ import numpy as np
18
+
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.framework import ops
21
+ from tensorflow.python.framework import tensor
22
+ from tensorflow.python.framework import tensor_conversion
23
+ from tensorflow.python.framework import tensor_shape
24
+ from tensorflow.python.ops import array_ops
25
+ from tensorflow.python.ops import check_ops
26
+ from tensorflow.python.ops import math_ops
27
+ from tensorflow.python.ops.distributions import util as distribution_util
28
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
29
+ from tensorflow.python.ops.linalg import linear_operator
30
+ from tensorflow.python.ops.linalg import linear_operator_util
31
+ from tensorflow.python.ops.linalg import property_hint_util
32
+ from tensorflow.python.ops.signal import fft_ops
33
+ from tensorflow.python.util.tf_export import tf_export
34
+
35
+ __all__ = [
36
+ "LinearOperatorCirculant",
37
+ "LinearOperatorCirculant2D",
38
+ "LinearOperatorCirculant3D",
39
+ ]
40
+
41
+ # Different FFT Ops will be used for different block depths.
42
+ _FFT_OP = {1: fft_ops.fft, 2: fft_ops.fft2d, 3: fft_ops.fft3d}
43
+ _IFFT_OP = {1: fft_ops.ifft, 2: fft_ops.ifft2d, 3: fft_ops.ifft3d}
44
+
45
+
46
+ def exponential_power_convolution_kernel(
47
+ grid_shape,
48
+ length_scale,
49
+ power=None,
50
+ divisor=None,
51
+ zero_inflation=None,
52
+ ):
53
+ """Make an exponentiated convolution kernel.
54
+
55
+ In signal processing, a [kernel]
56
+ (https://en.wikipedia.org/wiki/Kernel_(image_processing)) `h` can be convolved
57
+ with a signal `x` to filter its spectral content.
58
+
59
+ This function makes a `d-dimensional` convolution kernel `h` of shape
60
+ `grid_shape = [N0, N1, ...]`. For `n` a multi-index with `n[i] < Ni / 2`,
61
+
62
+ ```h[n] = exp{sum(|n / (length_scale * grid_shape)|**power) / divisor}.```
63
+
64
+ For other `n`, `h` is extended to be circularly symmetric. That is
65
+
66
+ ```h[n0 % N0, ...] = h[(-n0) % N0, ...]```
67
+
68
+ Since `h` is circularly symmetric and real valued, `H = FFTd[h]` is the
69
+ spectrum of a symmetric (real) circulant operator `A`.
70
+
71
+ #### Example uses
72
+
73
+ ```
74
+ # Matern one-half kernel, d=1.
75
+ # Will be positive definite without zero_inflation.
76
+ h = exponential_power_convolution_kernel(
77
+ grid_shape=[10], length_scale=[0.1], power=1)
78
+ A = LinearOperatorCirculant(
79
+ tf.signal.fft(tf.cast(h, tf.complex64)),
80
+ is_self_adjoint=True, is_positive_definite=True)
81
+
82
+ # Gaussian RBF kernel, d=3.
83
+ # Needs zero_inflation since `length_scale` is long enough to cause aliasing.
84
+ h = exponential_power_convolution_kernel(
85
+ grid_shape=[10, 10, 10], length_scale=[0.1, 0.2, 0.2], power=2,
86
+ zero_inflation=0.15)
87
+ A = LinearOperatorCirculant3D(
88
+ tf.signal.fft3d(tf.cast(h, tf.complex64)),
89
+ is_self_adjoint=True, is_positive_definite=True)
90
+ ```
91
+
92
+ Args:
93
+ grid_shape: Length `d` (`d` in {1, 2, 3}) list-like of Python integers. The
94
+ shape of the grid on which the convolution kernel is defined.
95
+ length_scale: Length `d` `float` `Tensor`. The scale at which the kernel
96
+ decays in each direction, as a fraction of `grid_shape`.
97
+ power: Scalar `Tensor` of same `dtype` as `length_scale`, default `2`.
98
+ Higher (lower) `power` results in nearby points being more (less)
99
+ correlated, and far away points being less (more) correlated.
100
+ divisor: Scalar `Tensor` of same `dtype` as `length_scale`. The slope of
101
+ decay of `log(kernel)` in terms of fractional grid points, along each
102
+ axis, at `length_scale`, is `power/divisor`. By default, `divisor` is set
103
+ to `power`. This means, by default, `power=2` results in an exponentiated
104
+ quadratic (Gaussian) kernel, and `power=1` is a Matern one-half.
105
+ zero_inflation: Scalar `Tensor` of same `dtype` as `length_scale`, in
106
+ `[0, 1]`. Let `delta` be the Kronecker delta. That is,
107
+ `delta[0, ..., 0] = 1` and all other entries are `0`. Then
108
+ `zero_inflation` modifies the return value via
109
+ `h --> (1 - zero_inflation) * h + zero_inflation * delta`. This may be
110
+ needed to ensure a positive definite kernel, especially if `length_scale`
111
+ is large enough for aliasing and `power > 1`.
112
+
113
+ Returns:
114
+ `Tensor` of shape `grid_shape` with same `dtype` as `length_scale`.
115
+ """
116
+ nd = len(grid_shape)
117
+
118
+ length_scale = tensor_conversion.convert_to_tensor_v2_with_dispatch(
119
+ length_scale, name="length_scale"
120
+ )
121
+ dtype = length_scale.dtype
122
+
123
+ power = 2. if power is None else power
124
+ power = tensor_conversion.convert_to_tensor_v2_with_dispatch(
125
+ power, name="power", dtype=dtype
126
+ )
127
+ divisor = power if divisor is None else divisor
128
+ divisor = tensor_conversion.convert_to_tensor_v2_with_dispatch(
129
+ divisor, name="divisor", dtype=dtype
130
+ )
131
+
132
+ # With K = grid_shape[i], we implicitly assume the grid vertices along the
133
+ # ith dimension are at:
134
+ # 0 = 0 / (K - 1), 1 / (K - 1), 2 / (K - 1), ..., (K - 1) / (K - 1) = 1.
135
+ zero = math_ops.cast(0., dtype)
136
+ one = math_ops.cast(1., dtype)
137
+ ts = [math_ops.linspace(zero, one, num=n) for n in grid_shape]
138
+
139
+ log_vals = []
140
+ for i, x in enumerate(array_ops.meshgrid(*ts, indexing="ij")):
141
+ # midpoint[i] is the vertex just to the left of 1 / 2.
142
+ # ifftshift will shift this vertex to position 0.
143
+ midpoint = ts[i][math_ops.cast(
144
+ math_ops.floor(one / 2. * grid_shape[i]), dtypes.int32)]
145
+ log_vals.append(-(math_ops.abs(
146
+ (x - midpoint) / length_scale[i]))**power / divisor)
147
+ kernel = math_ops.exp(
148
+ fft_ops.ifftshift(sum(log_vals), axes=[-i for i in range(1, nd + 1)]))
149
+
150
+ if zero_inflation:
151
+ # delta.shape = grid_shape, delta[0, 0, 0] = 1., all other entries are 0.
152
+ zero_inflation = tensor_conversion.convert_to_tensor_v2_with_dispatch(
153
+ zero_inflation, name="zero_inflation", dtype=dtype
154
+ )
155
+ delta = array_ops.pad(
156
+ array_ops.reshape(one, [1] * nd), [[0, dim - 1] for dim in grid_shape])
157
+ kernel = (1. - zero_inflation) * kernel + zero_inflation * delta
158
+
159
+ return kernel
160
+
161
+
162
+ # TODO(langmore) Add transformations that create common spectrums, e.g.
163
+ # starting with the convolution kernel
164
+ # start with half a spectrum, and create a Hermitian one.
165
+ # common filters.
166
+ # TODO(langmore) Support rectangular Toeplitz matrices.
167
+ class _BaseLinearOperatorCirculant(linear_operator.LinearOperator):
168
+ """Base class for circulant operators. Not user facing.
169
+
170
+ `LinearOperator` acting like a [batch] [[nested] block] circulant matrix.
171
+ """
172
+
173
+ def __init__(self,
174
+ spectrum: tensor.Tensor,
175
+ block_depth: int,
176
+ input_output_dtype=dtypes.complex64,
177
+ is_non_singular: bool = None,
178
+ is_self_adjoint: bool = None,
179
+ is_positive_definite: bool = None,
180
+ is_square: bool = True,
181
+ parameters=None,
182
+ name="LinearOperatorCirculant"):
183
+ r"""Initialize an `_BaseLinearOperatorCirculant`.
184
+
185
+ Args:
186
+ spectrum: Shape `[B1,...,Bb] + N` `Tensor`, where `rank(N) in {1, 2, 3}`.
187
+ Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
188
+ `complex128`. Type can be different than `input_output_dtype`
189
+ block_depth: Python integer, either 1, 2, or 3. Will be 1 for circulant,
190
+ 2 for block circulant, and 3 for nested block circulant.
191
+ input_output_dtype: `dtype` for input/output.
192
+ is_non_singular: Expect that this operator is non-singular.
193
+ is_self_adjoint: Expect that this operator is equal to its hermitian
194
+ transpose. If `spectrum` is real, this will always be true.
195
+ is_positive_definite: Expect that this operator is positive definite,
196
+ meaning the quadratic form `x^H A x` has positive real part for all
197
+ nonzero `x`. Note that we do not require the operator to be
198
+ self-adjoint to be positive-definite. See:
199
+ https://en.wikipedia.org/wiki/Positive-definite_matrix\
200
+ #Extension_for_non_symmetric_matrices
201
+ is_square: Expect that this operator acts like square [batch] matrices.
202
+ parameters: Python `dict` of parameters used to instantiate this
203
+ `LinearOperator`.
204
+ name: A name to prepend to all ops created by this class.
205
+
206
+ Raises:
207
+ ValueError: If `block_depth` is not an allowed value.
208
+ TypeError: If `spectrum` is not an allowed type.
209
+ """
210
+
211
+ allowed_block_depths = [1, 2, 3]
212
+
213
+ self._name = name
214
+
215
+ if block_depth not in allowed_block_depths:
216
+ raise ValueError(
217
+ f"Argument `block_depth` must be one of {allowed_block_depths}. "
218
+ f"Received: {block_depth}.")
219
+ self._block_depth = block_depth
220
+
221
+ with ops.name_scope(name, values=[spectrum]):
222
+ self._spectrum = self._check_spectrum_and_return_tensor(spectrum)
223
+
224
+ # Check and auto-set hints.
225
+ if not self.spectrum.dtype.is_complex:
226
+ if is_self_adjoint is False:
227
+ raise ValueError(
228
+ f"A real spectrum always corresponds to a self-adjoint operator. "
229
+ f"Expected argument `is_self_adjoint` to be True when "
230
+ f"`spectrum.dtype.is_complex` = True. "
231
+ f"Received: {is_self_adjoint}.")
232
+ is_self_adjoint = True
233
+
234
+ if is_square is False:
235
+ raise ValueError(
236
+ f"A [[nested] block] circulant operator is always square. "
237
+ f"Expected argument `is_square` to be True. Received: {is_square}.")
238
+ is_square = True
239
+
240
+ super(_BaseLinearOperatorCirculant, self).__init__(
241
+ dtype=dtypes.as_dtype(input_output_dtype),
242
+ is_non_singular=is_non_singular,
243
+ is_self_adjoint=is_self_adjoint,
244
+ is_positive_definite=is_positive_definite,
245
+ is_square=is_square,
246
+ parameters=parameters,
247
+ name=name)
248
+
249
+ def _check_spectrum_and_return_tensor(self, spectrum):
250
+ """Static check of spectrum. Then return `Tensor` version."""
251
+ spectrum = linear_operator_util.convert_nonref_to_tensor(spectrum,
252
+ name="spectrum")
253
+
254
+ if spectrum.shape.ndims is not None:
255
+ if spectrum.shape.ndims < self.block_depth:
256
+ raise ValueError(
257
+ f"Argument `spectrum` must have at least {self.block_depth} "
258
+ f"dimensions. Received: {spectrum}.")
259
+ return spectrum
260
+
261
+ @property
262
+ def block_depth(self):
263
+ """Depth of recursively defined circulant blocks defining this `Operator`.
264
+
265
+ With `A` the dense representation of this `Operator`,
266
+
267
+ `block_depth = 1` means `A` is symmetric circulant. For example,
268
+
269
+ ```
270
+ A = |w z y x|
271
+ |x w z y|
272
+ |y x w z|
273
+ |z y x w|
274
+ ```
275
+
276
+ `block_depth = 2` means `A` is block symmetric circulant with symmetric
277
+ circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant,
278
+
279
+ ```
280
+ A = |W Z Y X|
281
+ |X W Z Y|
282
+ |Y X W Z|
283
+ |Z Y X W|
284
+ ```
285
+
286
+ `block_depth = 3` means `A` is block symmetric circulant with block
287
+ symmetric circulant blocks.
288
+
289
+ Returns:
290
+ Python `integer`.
291
+ """
292
+ return self._block_depth
293
+
294
+ def block_shape_tensor(self):
295
+ """Shape of the block dimensions of `self.spectrum`."""
296
+ # If spectrum.shape = [s0, s1, s2], and block_depth = 2,
297
+ # block_shape = [s1, s2]
298
+ return self._block_shape_tensor()
299
+
300
+ def _block_shape_tensor(self, spectrum_shape=None):
301
+ if self.block_shape.is_fully_defined():
302
+ return linear_operator_util.shape_tensor(
303
+ self.block_shape.as_list(), name="block_shape")
304
+ spectrum_shape = (
305
+ array_ops.shape(self.spectrum)
306
+ if spectrum_shape is None else spectrum_shape)
307
+ return spectrum_shape[-self.block_depth:]
308
+
309
+ def _linop_adjoint(self) -> "_BaseLinearOperatorCirculant":
310
+ spectrum = self.spectrum
311
+ if spectrum.dtype.is_complex:
312
+ spectrum = math_ops.conj(spectrum)
313
+
314
+ # Conjugating the spectrum is sufficient to get the adjoint.
315
+ return _BaseLinearOperatorCirculant(
316
+ spectrum=spectrum,
317
+ block_depth=self.block_depth,
318
+ is_non_singular=self.is_non_singular,
319
+ is_self_adjoint=self.is_self_adjoint,
320
+ is_positive_definite=self.is_positive_definite,
321
+ is_square=True)
322
+
323
+ def _linop_inverse(self) -> "_BaseLinearOperatorCirculant":
324
+ return _BaseLinearOperatorCirculant(
325
+ spectrum=1. / self.spectrum,
326
+ block_depth=self.block_depth,
327
+ is_non_singular=self.is_non_singular,
328
+ is_self_adjoint=self.is_self_adjoint,
329
+ is_positive_definite=self.is_positive_definite,
330
+ is_square=True,
331
+ input_output_dtype=self.dtype)
332
+
333
+ def _linop_matmul(
334
+ self,
335
+ left_operator: "_BaseLinearOperatorCirculant",
336
+ right_operator: linear_operator.LinearOperator,
337
+ ) -> linear_operator.LinearOperator:
338
+ if (not isinstance(right_operator, _BaseLinearOperatorCirculant)
339
+ or not isinstance(left_operator, type(right_operator))):
340
+ return super()._linop_matmul(left_operator, right_operator)
341
+
342
+ return _BaseLinearOperatorCirculant(
343
+ spectrum=left_operator.spectrum * right_operator.spectrum,
344
+ block_depth=left_operator.block_depth,
345
+ is_non_singular=property_hint_util.combined_non_singular_hint(
346
+ left_operator, right_operator),
347
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
348
+ left_operator, right_operator),
349
+ is_positive_definite=(
350
+ property_hint_util.combined_commuting_positive_definite_hint(
351
+ left_operator, right_operator)),
352
+ is_square=True)
353
+
354
+ def _linop_solve(
355
+ self,
356
+ left_operator: "_BaseLinearOperatorCirculant",
357
+ right_operator: linear_operator.LinearOperator,
358
+ ) -> linear_operator.LinearOperator:
359
+ if (not isinstance(right_operator, _BaseLinearOperatorCirculant)
360
+ or not isinstance(left_operator, type(right_operator))):
361
+ return super()._linop_solve(left_operator, right_operator)
362
+
363
+ return _BaseLinearOperatorCirculant(
364
+ spectrum=right_operator.spectrum / left_operator.spectrum,
365
+ block_depth=left_operator.block_depth,
366
+ is_non_singular=property_hint_util.combined_non_singular_hint(
367
+ left_operator, right_operator),
368
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
369
+ left_operator, right_operator),
370
+ is_positive_definite=(
371
+ property_hint_util.combined_commuting_positive_definite_hint(
372
+ left_operator, right_operator)),
373
+ is_square=True)
374
+
375
+ @property
376
+ def block_shape(self):
377
+ return self.spectrum.shape[-self.block_depth:]
378
+
379
+ @property
380
+ def spectrum(self) -> tensor.Tensor:
381
+ return self._spectrum
382
+
383
+ def _vectorize_then_blockify(self, matrix):
384
+ """Shape batch matrix to batch vector, then blockify trailing dimensions."""
385
+ # Suppose
386
+ # matrix.shape = [m0, m1, m2, m3],
387
+ # and matrix is a matrix because the final two dimensions are matrix dims.
388
+ # self.block_depth = 2,
389
+ # self.block_shape = [b0, b1] (note b0 * b1 = m2).
390
+ # We will reshape matrix to
391
+ # [m3, m0, m1, b0, b1].
392
+
393
+ # Vectorize: Reshape to batch vector.
394
+ # [m0, m1, m2, m3] --> [m3, m0, m1, m2]
395
+ # This is called "vectorize" because we have taken the final two matrix dims
396
+ # and turned this into a size m3 batch of vectors.
397
+ vec = distribution_util.rotate_transpose(matrix, shift=1)
398
+
399
+ # Blockify: Blockfy trailing dimensions.
400
+ # [m3, m0, m1, m2] --> [m3, m0, m1, b0, b1]
401
+ if (vec.shape.is_fully_defined() and
402
+ self.block_shape.is_fully_defined()):
403
+ # vec_leading_shape = [m3, m0, m1],
404
+ # the parts of vec that will not be blockified.
405
+ vec_leading_shape = vec.shape[:-1]
406
+ final_shape = vec_leading_shape.concatenate(self.block_shape)
407
+ else:
408
+ vec_leading_shape = array_ops.shape(vec)[:-1]
409
+ final_shape = array_ops.concat(
410
+ (vec_leading_shape, self.block_shape_tensor()), 0)
411
+ return array_ops.reshape(vec, final_shape)
412
+
413
+ def _unblockify(self, x):
414
+ """Flatten the trailing block dimensions."""
415
+ # Suppose
416
+ # x.shape = [v0, v1, v2, v3],
417
+ # self.block_depth = 2.
418
+ # Then
419
+ # leading shape = [v0, v1]
420
+ # block shape = [v2, v3].
421
+ # We will reshape x to
422
+ # [v0, v1, v2*v3].
423
+ if x.shape.is_fully_defined():
424
+ # x_shape = [v0, v1, v2, v3]
425
+ x_shape = x.shape.as_list()
426
+ # x_leading_shape = [v0, v1]
427
+ x_leading_shape = x_shape[:-self.block_depth]
428
+ # x_block_shape = [v2, v3]
429
+ x_block_shape = x_shape[-self.block_depth:]
430
+ # flat_shape = [v0, v1, v2*v3]
431
+ flat_shape = x_leading_shape + [np.prod(x_block_shape)]
432
+ else:
433
+ x_shape = array_ops.shape(x)
434
+ x_leading_shape = x_shape[:-self.block_depth]
435
+ x_block_shape = x_shape[-self.block_depth:]
436
+ flat_shape = array_ops.concat(
437
+ (x_leading_shape, [math_ops.reduce_prod(x_block_shape)]), 0)
438
+ return array_ops.reshape(x, flat_shape)
439
+
440
+ def _unblockify_then_matricize(self, vec):
441
+ """Flatten the block dimensions then reshape to a batch matrix."""
442
+ # Suppose
443
+ # vec.shape = [v0, v1, v2, v3],
444
+ # self.block_depth = 2.
445
+ # Then
446
+ # leading shape = [v0, v1]
447
+ # block shape = [v2, v3].
448
+ # We will reshape vec to
449
+ # [v1, v2*v3, v0].
450
+
451
+ # Un-blockify: Flatten block dimensions. Reshape
452
+ # [v0, v1, v2, v3] --> [v0, v1, v2*v3].
453
+ vec_flat = self._unblockify(vec)
454
+
455
+ # Matricize: Reshape to batch matrix.
456
+ # [v0, v1, v2*v3] --> [v1, v2*v3, v0],
457
+ # representing a shape [v1] batch of [v2*v3, v0] matrices.
458
+ matrix = distribution_util.rotate_transpose(vec_flat, shift=-1)
459
+ return matrix
460
+
461
+ def _fft(self, x):
462
+ """FFT along the last self.block_depth dimensions of x.
463
+
464
+ Args:
465
+ x: `Tensor` with floating or complex `dtype`.
466
+ Should be in the form returned by self._vectorize_then_blockify.
467
+
468
+ Returns:
469
+ `Tensor` with `dtype` `complex64`.
470
+ """
471
+ x_complex = _to_complex(x)
472
+ return _FFT_OP[self.block_depth](x_complex)
473
+
474
+ def _ifft(self, x):
475
+ """IFFT along the last self.block_depth dimensions of x.
476
+
477
+ Args:
478
+ x: `Tensor` with floating or complex dtype. Should be in the form
479
+ returned by self._vectorize_then_blockify.
480
+
481
+ Returns:
482
+ `Tensor` with `dtype` `complex64`.
483
+ """
484
+ x_complex = _to_complex(x)
485
+ return _IFFT_OP[self.block_depth](x_complex)
486
+
487
+ def convolution_kernel(self, name="convolution_kernel"):
488
+ """Convolution kernel corresponding to `self.spectrum`.
489
+
490
+ The `D` dimensional DFT of this kernel is the frequency domain spectrum of
491
+ this operator.
492
+
493
+ Args:
494
+ name: A name to give this `Op`.
495
+
496
+ Returns:
497
+ `Tensor` with `dtype` `self.dtype`.
498
+ """
499
+ with self._name_scope(name): # pylint: disable=not-callable
500
+ h = self._ifft(_to_complex(self.spectrum))
501
+ return math_ops.cast(h, self.dtype)
502
+
503
+ def _shape(self):
504
+ s_shape = self._spectrum.shape
505
+ # Suppose spectrum.shape = [a, b, c, d]
506
+ # block_depth = 2
507
+ # Then:
508
+ # batch_shape = [a, b]
509
+ # N = c*d
510
+ # and we want to return
511
+ # [a, b, c*d, c*d]
512
+ batch_shape = s_shape[:-self.block_depth]
513
+ # trailing_dims = [c, d]
514
+ trailing_dims = s_shape[-self.block_depth:]
515
+ if trailing_dims.is_fully_defined():
516
+ n = np.prod(trailing_dims.as_list())
517
+ else:
518
+ n = None
519
+ n_x_n = tensor_shape.TensorShape([n, n])
520
+ return batch_shape.concatenate(n_x_n)
521
+
522
+ def _shape_tensor(self, spectrum=None):
523
+ spectrum = self.spectrum if spectrum is None else spectrum
524
+ # See self.shape for explanation of steps
525
+ s_shape = array_ops.shape(spectrum)
526
+ batch_shape = s_shape[:-self.block_depth]
527
+ trailing_dims = s_shape[-self.block_depth:]
528
+ n = math_ops.reduce_prod(trailing_dims)
529
+ n_x_n = [n, n]
530
+ return array_ops.concat((batch_shape, n_x_n), 0)
531
+
532
+ def assert_hermitian_spectrum(self, name="assert_hermitian_spectrum"):
533
+ """Returns an `Op` that asserts this operator has Hermitian spectrum.
534
+
535
+ This operator corresponds to a real-valued matrix if and only if its
536
+ spectrum is Hermitian.
537
+
538
+ Args:
539
+ name: A name to give this `Op`.
540
+
541
+ Returns:
542
+ An `Op` that asserts this operator has Hermitian spectrum.
543
+ """
544
+ eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps
545
+ with self._name_scope(name): # pylint: disable=not-callable
546
+ # Assume linear accumulation of error.
547
+ max_err = eps * self.domain_dimension_tensor()
548
+ imag_convolution_kernel = math_ops.imag(self.convolution_kernel())
549
+ return check_ops.assert_less(
550
+ math_ops.abs(imag_convolution_kernel),
551
+ max_err,
552
+ message="Spectrum was not Hermitian")
553
+
554
+ def _assert_non_singular(self):
555
+ return linear_operator_util.assert_no_entries_with_modulus_zero(
556
+ self.spectrum,
557
+ message="Singular operator: Spectrum contained zero values.")
558
+
559
+ def _assert_positive_definite(self):
560
+ # This operator has the action Ax = F^H D F x,
561
+ # where D is the diagonal matrix with self.spectrum on the diag. Therefore,
562
+ # <x, Ax> = <Fx, DFx>,
563
+ # Since F is bijective, the condition for positive definite is the same as
564
+ # for a diagonal matrix, i.e. real part of spectrum is positive.
565
+ message = (
566
+ "Not positive definite: Real part of spectrum was not all positive.")
567
+ return check_ops.assert_positive(
568
+ math_ops.real(self.spectrum), message=message)
569
+
570
+ def _assert_self_adjoint(self):
571
+ # Recall correspondence between symmetry and real transforms. See docstring
572
+ return linear_operator_util.assert_zero_imag_part(
573
+ self.spectrum,
574
+ message=(
575
+ "Not self-adjoint: The spectrum contained non-zero imaginary part."
576
+ ))
577
+
578
+ def _broadcast_batch_dims(self, x, spectrum):
579
+ """Broadcast batch dims of batch matrix `x` and spectrum."""
580
+ spectrum = tensor_conversion.convert_to_tensor_v2_with_dispatch(
581
+ spectrum, name="spectrum"
582
+ )
583
+ # spectrum.shape = batch_shape + block_shape
584
+ # First make spectrum a batch matrix with
585
+ # spectrum.shape = batch_shape + [prod(block_shape), 1]
586
+ batch_shape = self._batch_shape_tensor(
587
+ shape=self._shape_tensor(spectrum=spectrum))
588
+ spec_mat = array_ops.reshape(
589
+ spectrum, array_ops.concat((batch_shape, [-1, 1]), axis=0))
590
+ # Second, broadcast, possibly requiring an addition of array of zeros.
591
+ x, spec_mat = linear_operator_util.broadcast_matrix_batch_dims((x,
592
+ spec_mat))
593
+ # Third, put the block shape back into spectrum.
594
+ x_batch_shape = array_ops.shape(x)[:-2]
595
+ spectrum_shape = array_ops.shape(spectrum)
596
+ spectrum = array_ops.reshape(
597
+ spec_mat,
598
+ array_ops.concat(
599
+ (x_batch_shape,
600
+ self._block_shape_tensor(spectrum_shape=spectrum_shape)),
601
+ axis=0))
602
+
603
+ return x, spectrum
604
+
605
+ def _cond(self):
606
+ # Regardless of whether the operator is real, it is always diagonalizable by
607
+ # the Fourier basis F. I.e. A = F S F^H, with S a diagonal matrix
608
+ # containing the spectrum. We then have:
609
+ # A A^H = F SS^H F^H = F K F^H,
610
+ # where K = diag with squared absolute values of the spectrum.
611
+ # So in all cases,
612
+ abs_singular_values = math_ops.abs(self._unblockify(self.spectrum))
613
+ return (math_ops.reduce_max(abs_singular_values, axis=-1) /
614
+ math_ops.reduce_min(abs_singular_values, axis=-1))
615
+
616
+ def _eigvals(self):
617
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(
618
+ self._unblockify(self.spectrum)
619
+ )
620
+
621
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
622
+ x = linalg.adjoint(x) if adjoint_arg else x
623
+ # With F the matrix of a DFT, and F^{-1}, F^H the inverse and Hermitian
624
+ # transpose, one can show that F^{-1} = F^{H} is the IDFT matrix. Therefore
625
+ # matmul(x) = F^{-1} diag(spectrum) F x,
626
+ # = F^{H} diag(spectrum) F x,
627
+ # so that
628
+ # matmul(x, adjoint=True) = F^{H} diag(conj(spectrum)) F x.
629
+ spectrum = _to_complex(self.spectrum)
630
+ if adjoint:
631
+ spectrum = math_ops.conj(spectrum)
632
+
633
+ x = math_ops.cast(x, spectrum.dtype)
634
+
635
+ x, spectrum = self._broadcast_batch_dims(x, spectrum)
636
+
637
+ x_vb = self._vectorize_then_blockify(x)
638
+ fft_x_vb = self._fft(x_vb)
639
+ block_vector_result = self._ifft(spectrum * fft_x_vb)
640
+ y = self._unblockify_then_matricize(block_vector_result)
641
+
642
+ return math_ops.cast(y, self.dtype)
643
+
644
+ def _determinant(self):
645
+ axis = [-(i + 1) for i in range(self.block_depth)]
646
+ det = math_ops.reduce_prod(self.spectrum, axis=axis)
647
+ return math_ops.cast(det, self.dtype)
648
+
649
+ def _log_abs_determinant(self):
650
+ axis = [-(i + 1) for i in range(self.block_depth)]
651
+ lad = math_ops.reduce_sum(
652
+ math_ops.log(math_ops.abs(self.spectrum)), axis=axis)
653
+ return math_ops.cast(lad, self.dtype)
654
+
655
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
656
+ rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
657
+ spectrum = _to_complex(self.spectrum)
658
+ if adjoint:
659
+ spectrum = math_ops.conj(spectrum)
660
+
661
+ rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum)
662
+
663
+ rhs_vb = self._vectorize_then_blockify(rhs)
664
+ fft_rhs_vb = self._fft(rhs_vb)
665
+ solution_vb = self._ifft(fft_rhs_vb / spectrum)
666
+ x = self._unblockify_then_matricize(solution_vb)
667
+ return math_ops.cast(x, self.dtype)
668
+
669
+ def _diag_part(self):
670
+ # Get ones in shape of diag, which is [B1,...,Bb, N]
671
+ # Also get the size of the diag, "N".
672
+ if self.shape.is_fully_defined():
673
+ diag_shape = self.shape[:-1]
674
+ diag_size = self.domain_dimension.value
675
+ else:
676
+ diag_shape = self.shape_tensor()[:-1]
677
+ diag_size = self.domain_dimension_tensor()
678
+ ones_diag = array_ops.ones(diag_shape, dtype=self.dtype)
679
+
680
+ # As proved in comments in self._trace, the value on the diag is constant,
681
+ # repeated N times. This value is the trace divided by N.
682
+
683
+ # The handling of self.shape = (0, 0) is tricky, and is the reason we choose
684
+ # to compute trace and use that to compute diag_part, rather than computing
685
+ # the value on the diagonal ("diag_value") directly. Both result in a 0/0,
686
+ # but in different places, and the current method gives the right result in
687
+ # the end.
688
+
689
+ # Here, if self.shape = (0, 0), then self.trace() = 0., and then
690
+ # diag_value = 0. / 0. = NaN.
691
+ diag_value = self.trace() / math_ops.cast(diag_size, self.dtype)
692
+
693
+ # If self.shape = (0, 0), then ones_diag = [] (empty tensor), and then
694
+ # the following line is NaN * [] = [], as needed.
695
+ return diag_value[..., array_ops.newaxis] * ones_diag
696
+
697
+ def _trace(self):
698
+ # The diagonal of the [[nested] block] circulant operator is the mean of
699
+ # the spectrum.
700
+ # Proof: For the [0,...,0] element, this follows from the IDFT formula.
701
+ # Then the result follows since all diagonal elements are the same.
702
+
703
+ # Therefore, the trace is the sum of the spectrum.
704
+
705
+ # Get shape of diag along with the axis over which to reduce the spectrum.
706
+ # We will reduce the spectrum over all block indices.
707
+ if self.spectrum.shape.is_fully_defined():
708
+ spec_rank = self.spectrum.shape.ndims
709
+ axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
710
+ else:
711
+ spec_rank = array_ops.rank(self.spectrum)
712
+ axis = math_ops.range(spec_rank - self.block_depth, spec_rank)
713
+
714
+ # Real diag part "re_d".
715
+ # Suppose spectrum.shape = [B1,...,Bb, N1, N2]
716
+ # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
717
+ # re_d_value.shape = [B1,...,Bb]
718
+ re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)
719
+
720
+ if not self.dtype.is_complex:
721
+ return math_ops.cast(re_d_value, self.dtype)
722
+
723
+ # Imaginary part, "im_d".
724
+ if self.is_self_adjoint:
725
+ im_d_value = array_ops.zeros_like(re_d_value)
726
+ else:
727
+ im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)
728
+
729
+ return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
730
+
731
+ @property
732
+ def _composite_tensor_fields(self):
733
+ return ("spectrum", "input_output_dtype")
734
+
735
+ @property
736
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
737
+ return {"spectrum": self.block_depth}
738
+
739
+
740
+ @tf_export("linalg.LinearOperatorCirculant")
741
+ @linear_operator.make_composite_tensor
742
+ class LinearOperatorCirculant(_BaseLinearOperatorCirculant):
743
+ """`LinearOperator` acting like a circulant matrix.
744
+
745
+ This operator acts like a circulant matrix `A` with
746
+ shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
747
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
748
+ an `N x N` matrix. This matrix `A` is not materialized, but for
749
+ purposes of broadcasting this shape will be relevant.
750
+
751
+ #### Description in terms of circulant matrices
752
+
753
+ Circulant means the entries of `A` are generated by a single vector, the
754
+ convolution kernel `h`: `A_{mn} := h_{m-n mod N}`. With `h = [w, x, y, z]`,
755
+
756
+ ```
757
+ A = |w z y x|
758
+ |x w z y|
759
+ |y x w z|
760
+ |z y x w|
761
+ ```
762
+
763
+ This means that the result of matrix multiplication `v = Au` has `Lth` column
764
+ given circular convolution between `h` with the `Lth` column of `u`.
765
+
766
+ #### Description in terms of the frequency spectrum
767
+
768
+ There is an equivalent description in terms of the [batch] spectrum `H` and
769
+ Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
770
+ dimensions. Define the discrete Fourier transform (DFT) and its inverse by
771
+
772
+ ```
773
+ DFT[ h[n] ] = H[k] := sum_{n = 0}^{N - 1} h_n e^{-i 2pi k n / N}
774
+ IDFT[ H[k] ] = h[n] = N^{-1} sum_{k = 0}^{N - 1} H_k e^{i 2pi k n / N}
775
+ ```
776
+
777
+ From these definitions, we see that
778
+
779
+ ```
780
+ H[0] = sum_{n = 0}^{N - 1} h_n
781
+ H[1] = "the first positive frequency"
782
+ H[N - 1] = "the first negative frequency"
783
+ ```
784
+
785
+ Loosely speaking, with `*` element-wise multiplication, matrix multiplication
786
+ is equal to the action of a Fourier multiplier: `A u = IDFT[ H * DFT[u] ]`.
787
+ Precisely speaking, given `[N, R]` matrix `u`, let `DFT[u]` be the `[N, R]`
788
+ matrix with `rth` column equal to the DFT of the `rth` column of `u`.
789
+ Define the `IDFT` similarly.
790
+ Matrix multiplication may be expressed columnwise:
791
+
792
+ ```(A u)_r = IDFT[ H * (DFT[u])_r ]```
793
+
794
+ #### Operator properties deduced from the spectrum.
795
+
796
+ Letting `U` be the `kth` Euclidean basis vector, and `U = IDFT[u]`.
797
+ The above formulas show that`A U = H_k * U`. We conclude that the elements
798
+ of `H` are the eigenvalues of this operator. Therefore
799
+
800
+ * This operator is positive definite if and only if `Real{H} > 0`.
801
+
802
+ A general property of Fourier transforms is the correspondence between
803
+ Hermitian functions and real valued transforms.
804
+
805
+ Suppose `H.shape = [B1,...,Bb, N]`. We say that `H` is a Hermitian spectrum
806
+ if, with `%` meaning modulus division,
807
+
808
+ ```H[..., n % N] = ComplexConjugate[ H[..., (-n) % N] ]```
809
+
810
+ * This operator corresponds to a real matrix if and only if `H` is Hermitian.
811
+ * This operator is self-adjoint if and only if `H` is real.
812
+
813
+ See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
814
+
815
+ #### Example of a self-adjoint positive definite operator
816
+
817
+ ```python
818
+ # spectrum is real ==> operator is self-adjoint
819
+ # spectrum is positive ==> operator is positive definite
820
+ spectrum = [6., 4, 2]
821
+
822
+ operator = LinearOperatorCirculant(spectrum)
823
+
824
+ # IFFT[spectrum]
825
+ operator.convolution_kernel()
826
+ ==> [4 + 0j, 1 + 0.58j, 1 - 0.58j]
827
+
828
+ operator.to_dense()
829
+ ==> [[4 + 0.0j, 1 - 0.6j, 1 + 0.6j],
830
+ [1 + 0.6j, 4 + 0.0j, 1 - 0.6j],
831
+ [1 - 0.6j, 1 + 0.6j, 4 + 0.0j]]
832
+ ```
833
+
834
+ #### Example of defining in terms of a real convolution kernel
835
+
836
+ ```python
837
+ # convolution_kernel is real ==> spectrum is Hermitian.
838
+ convolution_kernel = [1., 2., 1.]]
839
+ spectrum = tf.signal.fft(tf.cast(convolution_kernel, tf.complex64))
840
+
841
+ # spectrum is Hermitian ==> operator is real.
842
+ # spectrum is shape [3] ==> operator is shape [3, 3]
843
+ # We force the input/output type to be real, which allows this to operate
844
+ # like a real matrix.
845
+ operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32)
846
+
847
+ operator.to_dense()
848
+ ==> [[ 1, 1, 2],
849
+ [ 2, 1, 1],
850
+ [ 1, 2, 1]]
851
+ ```
852
+
853
+ #### Example of Hermitian spectrum
854
+
855
+ ```python
856
+ # spectrum is shape [3] ==> operator is shape [3, 3]
857
+ # spectrum is Hermitian ==> operator is real.
858
+ spectrum = [1, 1j, -1j]
859
+
860
+ operator = LinearOperatorCirculant(spectrum)
861
+
862
+ operator.to_dense()
863
+ ==> [[ 0.33 + 0j, 0.91 + 0j, -0.24 + 0j],
864
+ [-0.24 + 0j, 0.33 + 0j, 0.91 + 0j],
865
+ [ 0.91 + 0j, -0.24 + 0j, 0.33 + 0j]
866
+ ```
867
+
868
+ #### Example of forcing real `dtype` when spectrum is Hermitian
869
+
870
+ ```python
871
+ # spectrum is shape [4] ==> operator is shape [4, 4]
872
+ # spectrum is real ==> operator is self-adjoint
873
+ # spectrum is Hermitian ==> operator is real
874
+ # spectrum has positive real part ==> operator is positive-definite.
875
+ spectrum = [6., 4, 2, 4]
876
+
877
+ # Force the input dtype to be float32.
878
+ # Cast the output to float32. This is fine because the operator will be
879
+ # real due to Hermitian spectrum.
880
+ operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32)
881
+
882
+ operator.shape
883
+ ==> [4, 4]
884
+
885
+ operator.to_dense()
886
+ ==> [[4, 1, 0, 1],
887
+ [1, 4, 1, 0],
888
+ [0, 1, 4, 1],
889
+ [1, 0, 1, 4]]
890
+
891
+ # convolution_kernel = tf.signal.ifft(spectrum)
892
+ operator.convolution_kernel()
893
+ ==> [4, 1, 0, 1]
894
+ ```
895
+
896
+ #### Performance
897
+
898
+ Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
899
+ and `x.shape = [N, R]`. Then
900
+
901
+ * `operator.matmul(x)` is `O(R*N*Log[N])`
902
+ * `operator.solve(x)` is `O(R*N*Log[N])`
903
+ * `operator.determinant()` involves a size `N` `reduce_prod`.
904
+
905
+ If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
906
+ `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
907
+
908
+ #### Matrix property hints
909
+
910
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
911
+ for `X = non_singular, self_adjoint, positive_definite, square`.
912
+ These have the following meaning:
913
+
914
+ * If `is_X == True`, callers should expect the operator to have the
915
+ property `X`. This is a promise that should be fulfilled, but is *not* a
916
+ runtime assert. For example, finite floating point precision may result
917
+ in these promises being violated.
918
+ * If `is_X == False`, callers should expect the operator to not have `X`.
919
+ * If `is_X == None` (the default), callers should have no expectation either
920
+ way.
921
+
922
+ References:
923
+ Toeplitz and Circulant Matrices - A Review:
924
+ [Gray, 2006](https://www.nowpublishers.com/article/Details/CIT-006)
925
+ ([pdf](https://ee.stanford.edu/~gray/toeplitz.pdf))
926
+ """
927
+
928
+ def __init__(self,
929
+ spectrum: tensor.Tensor,
930
+ input_output_dtype=dtypes.complex64,
931
+ is_non_singular: bool = None,
932
+ is_self_adjoint: bool = None,
933
+ is_positive_definite: bool = None,
934
+ is_square: bool = True,
935
+ name="LinearOperatorCirculant"):
936
+ r"""Initialize an `LinearOperatorCirculant`.
937
+
938
+ This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
939
+ by providing `spectrum`, a `[B1,...,Bb, N]` `Tensor`.
940
+
941
+ If `input_output_dtype = DTYPE`:
942
+
943
+ * Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
944
+ * Values returned by all methods, such as `matmul` or `determinant` will be
945
+ cast to `DTYPE`.
946
+
947
+ Note that if the spectrum is not Hermitian, then this operator corresponds
948
+ to a complex matrix with non-zero imaginary part. In this case, setting
949
+ `input_output_dtype` to a real type will forcibly cast the output to be
950
+ real, resulting in incorrect results!
951
+
952
+ If on the other hand the spectrum is Hermitian, then this operator
953
+ corresponds to a real-valued matrix, and setting `input_output_dtype` to
954
+ a real type is fine.
955
+
956
+ Args:
957
+ spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`,
958
+ `float32`, `float64`, `complex64`, `complex128`. Type can be different
959
+ than `input_output_dtype`
960
+ input_output_dtype: `dtype` for input/output.
961
+ is_non_singular: Expect that this operator is non-singular.
962
+ is_self_adjoint: Expect that this operator is equal to its hermitian
963
+ transpose. If `spectrum` is real, this will always be true.
964
+ is_positive_definite: Expect that this operator is positive definite,
965
+ meaning the quadratic form `x^H A x` has positive real part for all
966
+ nonzero `x`. Note that we do not require the operator to be
967
+ self-adjoint to be positive-definite. See:
968
+ https://en.wikipedia.org/wiki/Positive-definite_matrix\
969
+ #Extension_for_non_symmetric_matrices
970
+ is_square: Expect that this operator acts like square [batch] matrices.
971
+ name: A name to prepend to all ops created by this class.
972
+ """
973
+ parameters = dict(
974
+ spectrum=spectrum,
975
+ input_output_dtype=input_output_dtype,
976
+ is_non_singular=is_non_singular,
977
+ is_self_adjoint=is_self_adjoint,
978
+ is_positive_definite=is_positive_definite,
979
+ is_square=is_square,
980
+ name=name
981
+ )
982
+ super(LinearOperatorCirculant, self).__init__(
983
+ spectrum,
984
+ block_depth=1,
985
+ input_output_dtype=input_output_dtype,
986
+ is_non_singular=is_non_singular,
987
+ is_self_adjoint=is_self_adjoint,
988
+ is_positive_definite=is_positive_definite,
989
+ is_square=is_square,
990
+ parameters=parameters,
991
+ name=name)
992
+
993
+ def _linop_adjoint(self) -> "LinearOperatorCirculant":
994
+ spectrum = self.spectrum
995
+ if spectrum.dtype.is_complex:
996
+ spectrum = math_ops.conj(spectrum)
997
+
998
+ # Conjugating the spectrum is sufficient to get the adjoint.
999
+ return LinearOperatorCirculant(
1000
+ spectrum=spectrum,
1001
+ is_non_singular=self.is_non_singular,
1002
+ is_self_adjoint=self.is_self_adjoint,
1003
+ is_positive_definite=self.is_positive_definite,
1004
+ is_square=True)
1005
+
1006
+ def _linop_inverse(self) -> "LinearOperatorCirculant":
1007
+ return LinearOperatorCirculant(
1008
+ spectrum=1. / self.spectrum,
1009
+ is_non_singular=self.is_non_singular,
1010
+ is_self_adjoint=self.is_self_adjoint,
1011
+ is_positive_definite=self.is_positive_definite,
1012
+ is_square=True,
1013
+ input_output_dtype=self.dtype)
1014
+
1015
+ def _linop_matmul(
1016
+ self,
1017
+ left_operator: "LinearOperatorCirculant",
1018
+ right_operator: linear_operator.LinearOperator,
1019
+ ) -> linear_operator.LinearOperator:
1020
+ if not isinstance(
1021
+ right_operator, LinearOperatorCirculant
1022
+ ) or not isinstance(left_operator, type(right_operator)):
1023
+ return super()._linop_matmul(left_operator, right_operator)
1024
+
1025
+ return LinearOperatorCirculant(
1026
+ spectrum=left_operator.spectrum * right_operator.spectrum,
1027
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1028
+ left_operator, right_operator
1029
+ ),
1030
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1031
+ left_operator, right_operator
1032
+ ),
1033
+ is_positive_definite=(
1034
+ property_hint_util.combined_commuting_positive_definite_hint(
1035
+ left_operator, right_operator
1036
+ )
1037
+ ),
1038
+ is_square=True,
1039
+ )
1040
+
1041
+ def _linop_solve(
1042
+ self,
1043
+ left_operator: "LinearOperatorCirculant",
1044
+ right_operator: linear_operator.LinearOperator,
1045
+ ) -> linear_operator.LinearOperator:
1046
+ if not isinstance(right_operator, LinearOperatorCirculant):
1047
+ return super()._linop_solve(left_operator, right_operator)
1048
+
1049
+ return LinearOperatorCirculant(
1050
+ spectrum=right_operator.spectrum / left_operator.spectrum,
1051
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1052
+ left_operator, right_operator),
1053
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1054
+ left_operator, right_operator),
1055
+ is_positive_definite=(
1056
+ property_hint_util.combined_commuting_positive_definite_hint(
1057
+ left_operator, right_operator)),
1058
+ is_square=True)
1059
+
1060
+
1061
+ @tf_export("linalg.LinearOperatorCirculant2D")
1062
+ @linear_operator.make_composite_tensor
1063
+ class LinearOperatorCirculant2D(_BaseLinearOperatorCirculant):
1064
+ """`LinearOperator` acting like a block circulant matrix.
1065
+
1066
+ This operator acts like a block circulant matrix `A` with
1067
+ shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
1068
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
1069
+ an `N x N` matrix. This matrix `A` is not materialized, but for
1070
+ purposes of broadcasting this shape will be relevant.
1071
+
1072
+ #### Description in terms of block circulant matrices
1073
+
1074
+ If `A` is block circulant, with block sizes `N0, N1` (`N0 * N1 = N`):
1075
+ `A` has a block circulant structure, composed of `N0 x N0` blocks, with each
1076
+ block an `N1 x N1` circulant matrix.
1077
+
1078
+ For example, with `W`, `X`, `Y`, `Z` each circulant,
1079
+
1080
+ ```
1081
+ A = |W Z Y X|
1082
+ |X W Z Y|
1083
+ |Y X W Z|
1084
+ |Z Y X W|
1085
+ ```
1086
+
1087
+ Note that `A` itself will not in general be circulant.
1088
+
1089
+ #### Description in terms of the frequency spectrum
1090
+
1091
+ There is an equivalent description in terms of the [batch] spectrum `H` and
1092
+ Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
1093
+ dimensions.
1094
+
1095
+ If `H.shape = [N0, N1]`, (`N0 * N1 = N`):
1096
+ Loosely speaking, matrix multiplication is equal to the action of a
1097
+ Fourier multiplier: `A u = IDFT2[ H DFT2[u] ]`.
1098
+ Precisely speaking, given `[N, R]` matrix `u`, let `DFT2[u]` be the
1099
+ `[N0, N1, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, R]` and taking
1100
+ a two dimensional DFT across the first two dimensions. Let `IDFT2` be the
1101
+ inverse of `DFT2`. Matrix multiplication may be expressed columnwise:
1102
+
1103
+ ```(A u)_r = IDFT2[ H * (DFT2[u])_r ]```
1104
+
1105
+ #### Operator properties deduced from the spectrum.
1106
+
1107
+ * This operator is positive definite if and only if `Real{H} > 0`.
1108
+
1109
+ A general property of Fourier transforms is the correspondence between
1110
+ Hermitian functions and real valued transforms.
1111
+
1112
+ Suppose `H.shape = [B1,...,Bb, N0, N1]`, we say that `H` is a Hermitian
1113
+ spectrum if, with `%` indicating modulus division,
1114
+
1115
+ ```
1116
+ H[..., n0 % N0, n1 % N1] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1 ].
1117
+ ```
1118
+
1119
+ * This operator corresponds to a real matrix if and only if `H` is Hermitian.
1120
+ * This operator is self-adjoint if and only if `H` is real.
1121
+
1122
+ See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
1123
+
1124
+ ### Example of a self-adjoint positive definite operator
1125
+
1126
+ ```python
1127
+ # spectrum is real ==> operator is self-adjoint
1128
+ # spectrum is positive ==> operator is positive definite
1129
+ spectrum = [[1., 2., 3.],
1130
+ [4., 5., 6.],
1131
+ [7., 8., 9.]]
1132
+
1133
+ operator = LinearOperatorCirculant2D(spectrum)
1134
+
1135
+ # IFFT[spectrum]
1136
+ operator.convolution_kernel()
1137
+ ==> [[5.0+0.0j, -0.5-.3j, -0.5+.3j],
1138
+ [-1.5-.9j, 0, 0],
1139
+ [-1.5+.9j, 0, 0]]
1140
+
1141
+ operator.to_dense()
1142
+ ==> Complex self adjoint 9 x 9 matrix.
1143
+ ```
1144
+
1145
+ #### Example of defining in terms of a real convolution kernel,
1146
+
1147
+ ```python
1148
+ # convolution_kernel is real ==> spectrum is Hermitian.
1149
+ convolution_kernel = [[1., 2., 1.], [5., -1., 1.]]
1150
+ spectrum = tf.signal.fft2d(tf.cast(convolution_kernel, tf.complex64))
1151
+
1152
+ # spectrum is shape [2, 3] ==> operator is shape [6, 6]
1153
+ # spectrum is Hermitian ==> operator is real.
1154
+ operator = LinearOperatorCirculant2D(spectrum, input_output_dtype=tf.float32)
1155
+ ```
1156
+
1157
+ #### Performance
1158
+
1159
+ Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
1160
+ and `x.shape = [N, R]`. Then
1161
+
1162
+ * `operator.matmul(x)` is `O(R*N*Log[N])`
1163
+ * `operator.solve(x)` is `O(R*N*Log[N])`
1164
+ * `operator.determinant()` involves a size `N` `reduce_prod`.
1165
+
1166
+ If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
1167
+ `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
1168
+
1169
+ #### Matrix property hints
1170
+
1171
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
1172
+ for `X = non_singular, self_adjoint, positive_definite, square`.
1173
+ These have the following meaning
1174
+ * If `is_X == True`, callers should expect the operator to have the
1175
+ property `X`. This is a promise that should be fulfilled, but is *not* a
1176
+ runtime assert. For example, finite floating point precision may result
1177
+ in these promises being violated.
1178
+ * If `is_X == False`, callers should expect the operator to not have `X`.
1179
+ * If `is_X == None` (the default), callers should have no expectation either
1180
+ way.
1181
+ """
1182
+
1183
+ def __init__(self,
1184
+ spectrum: tensor.Tensor,
1185
+ input_output_dtype=dtypes.complex64,
1186
+ is_non_singular: bool = None,
1187
+ is_self_adjoint: bool = None,
1188
+ is_positive_definite: bool = None,
1189
+ is_square: bool = True,
1190
+ name="LinearOperatorCirculant2D"):
1191
+ r"""Initialize an `LinearOperatorCirculant2D`.
1192
+
1193
+ This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
1194
+ by providing `spectrum`, a `[B1,...,Bb, N0, N1]` `Tensor` with `N0*N1 = N`.
1195
+
1196
+ If `input_output_dtype = DTYPE`:
1197
+
1198
+ * Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
1199
+ * Values returned by all methods, such as `matmul` or `determinant` will be
1200
+ cast to `DTYPE`.
1201
+
1202
+ Note that if the spectrum is not Hermitian, then this operator corresponds
1203
+ to a complex matrix with non-zero imaginary part. In this case, setting
1204
+ `input_output_dtype` to a real type will forcibly cast the output to be
1205
+ real, resulting in incorrect results!
1206
+
1207
+ If on the other hand the spectrum is Hermitian, then this operator
1208
+ corresponds to a real-valued matrix, and setting `input_output_dtype` to
1209
+ a real type is fine.
1210
+
1211
+ Args:
1212
+ spectrum: Shape `[B1,...,Bb, N0, N1]` `Tensor`. Allowed dtypes:
1213
+ `float16`, `float32`, `float64`, `complex64`, `complex128`.
1214
+ Type can be different than `input_output_dtype`
1215
+ input_output_dtype: `dtype` for input/output.
1216
+ is_non_singular: Expect that this operator is non-singular.
1217
+ is_self_adjoint: Expect that this operator is equal to its hermitian
1218
+ transpose. If `spectrum` is real, this will always be true.
1219
+ is_positive_definite: Expect that this operator is positive definite,
1220
+ meaning the quadratic form `x^H A x` has positive real part for all
1221
+ nonzero `x`. Note that we do not require the operator to be
1222
+ self-adjoint to be positive-definite. See:
1223
+ https://en.wikipedia.org/wiki/Positive-definite_matrix\
1224
+ #Extension_for_non_symmetric_matrices
1225
+ is_square: Expect that this operator acts like square [batch] matrices.
1226
+ name: A name to prepend to all ops created by this class.
1227
+ """
1228
+ parameters = dict(
1229
+ spectrum=spectrum,
1230
+ input_output_dtype=input_output_dtype,
1231
+ is_non_singular=is_non_singular,
1232
+ is_self_adjoint=is_self_adjoint,
1233
+ is_positive_definite=is_positive_definite,
1234
+ is_square=is_square,
1235
+ name=name
1236
+ )
1237
+ super(LinearOperatorCirculant2D, self).__init__(
1238
+ spectrum,
1239
+ block_depth=2,
1240
+ input_output_dtype=input_output_dtype,
1241
+ is_non_singular=is_non_singular,
1242
+ is_self_adjoint=is_self_adjoint,
1243
+ is_positive_definite=is_positive_definite,
1244
+ is_square=is_square,
1245
+ parameters=parameters,
1246
+ name=name)
1247
+
1248
+ def _linop_adjoint(self) -> "LinearOperatorCirculant2D":
1249
+ spectrum = self.spectrum
1250
+ if spectrum.dtype.is_complex:
1251
+ spectrum = math_ops.conj(spectrum)
1252
+
1253
+ # Conjugating the spectrum is sufficient to get the adjoint.
1254
+ return LinearOperatorCirculant2D(
1255
+ spectrum=spectrum,
1256
+ is_non_singular=self.is_non_singular,
1257
+ is_self_adjoint=self.is_self_adjoint,
1258
+ is_positive_definite=self.is_positive_definite,
1259
+ is_square=True)
1260
+
1261
+ def _linop_inverse(self) -> "LinearOperatorCirculant2D":
1262
+ return LinearOperatorCirculant2D(
1263
+ spectrum=1. / self.spectrum,
1264
+ is_non_singular=self.is_non_singular,
1265
+ is_self_adjoint=self.is_self_adjoint,
1266
+ is_positive_definite=self.is_positive_definite,
1267
+ is_square=True,
1268
+ input_output_dtype=self.dtype)
1269
+
1270
+ def _linop_matmul(
1271
+ self,
1272
+ left_operator: "LinearOperatorCirculant2D",
1273
+ right_operator: linear_operator.LinearOperator,
1274
+ ) -> linear_operator.LinearOperator:
1275
+ if not isinstance(
1276
+ right_operator, LinearOperatorCirculant2D
1277
+ ) or not isinstance(left_operator, type(right_operator)):
1278
+ return super()._linop_matmul(left_operator, right_operator)
1279
+
1280
+ return LinearOperatorCirculant2D(
1281
+ spectrum=left_operator.spectrum * right_operator.spectrum,
1282
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1283
+ left_operator, right_operator
1284
+ ),
1285
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1286
+ left_operator, right_operator
1287
+ ),
1288
+ is_positive_definite=(
1289
+ property_hint_util.combined_commuting_positive_definite_hint(
1290
+ left_operator, right_operator
1291
+ )
1292
+ ),
1293
+ is_square=True,
1294
+ )
1295
+
1296
+ def _linop_solve(
1297
+ self,
1298
+ left_operator: "LinearOperatorCirculant2D",
1299
+ right_operator: linear_operator.LinearOperator,
1300
+ ) -> linear_operator.LinearOperator:
1301
+ if not isinstance(right_operator, LinearOperatorCirculant2D):
1302
+ return super()._linop_solve(left_operator, right_operator)
1303
+
1304
+ return LinearOperatorCirculant2D(
1305
+ spectrum=right_operator.spectrum / left_operator.spectrum,
1306
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1307
+ left_operator, right_operator),
1308
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1309
+ left_operator, right_operator),
1310
+ is_positive_definite=(
1311
+ property_hint_util.combined_commuting_positive_definite_hint(
1312
+ left_operator, right_operator)),
1313
+ is_square=True)
1314
+
1315
+
1316
+ @tf_export("linalg.LinearOperatorCirculant3D")
1317
+ @linear_operator.make_composite_tensor
1318
+ class LinearOperatorCirculant3D(_BaseLinearOperatorCirculant):
1319
+ """`LinearOperator` acting like a nested block circulant matrix.
1320
+
1321
+ This operator acts like a block circulant matrix `A` with
1322
+ shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
1323
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
1324
+ an `N x N` matrix. This matrix `A` is not materialized, but for
1325
+ purposes of broadcasting this shape will be relevant.
1326
+
1327
+ #### Description in terms of block circulant matrices
1328
+
1329
+ If `A` is nested block circulant, with block sizes `N0, N1, N2`
1330
+ (`N0 * N1 * N2 = N`):
1331
+ `A` has a block structure, composed of `N0 x N0` blocks, with each
1332
+ block an `N1 x N1` block circulant matrix.
1333
+
1334
+ For example, with `W`, `X`, `Y`, `Z` each block circulant,
1335
+
1336
+ ```
1337
+ A = |W Z Y X|
1338
+ |X W Z Y|
1339
+ |Y X W Z|
1340
+ |Z Y X W|
1341
+ ```
1342
+
1343
+ Note that `A` itself will not in general be circulant.
1344
+
1345
+ #### Description in terms of the frequency spectrum
1346
+
1347
+ There is an equivalent description in terms of the [batch] spectrum `H` and
1348
+ Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
1349
+ dimensions.
1350
+
1351
+ If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`):
1352
+ Loosely speaking, matrix multiplication is equal to the action of a
1353
+ Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`.
1354
+ Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the
1355
+ `[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and
1356
+ taking a three dimensional DFT across the first three dimensions. Let `IDFT3`
1357
+ be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise:
1358
+
1359
+ ```(A u)_r = IDFT3[ H * (DFT3[u])_r ]```
1360
+
1361
+ #### Operator properties deduced from the spectrum.
1362
+
1363
+ * This operator is positive definite if and only if `Real{H} > 0`.
1364
+
1365
+ A general property of Fourier transforms is the correspondence between
1366
+ Hermitian functions and real valued transforms.
1367
+
1368
+ Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian
1369
+ spectrum if, with `%` meaning modulus division,
1370
+
1371
+ ```
1372
+ H[..., n0 % N0, n1 % N1, n2 % N2]
1373
+ = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ].
1374
+ ```
1375
+
1376
+ * This operator corresponds to a real matrix if and only if `H` is Hermitian.
1377
+ * This operator is self-adjoint if and only if `H` is real.
1378
+
1379
+ See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
1380
+
1381
+ ### Examples
1382
+
1383
+ See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples.
1384
+
1385
+ #### Performance
1386
+
1387
+ Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
1388
+ and `x.shape = [N, R]`. Then
1389
+
1390
+ * `operator.matmul(x)` is `O(R*N*Log[N])`
1391
+ * `operator.solve(x)` is `O(R*N*Log[N])`
1392
+ * `operator.determinant()` involves a size `N` `reduce_prod`.
1393
+
1394
+ If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
1395
+ `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
1396
+
1397
+ #### Matrix property hints
1398
+
1399
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
1400
+ for `X = non_singular, self_adjoint, positive_definite, square`.
1401
+ These have the following meaning
1402
+ * If `is_X == True`, callers should expect the operator to have the
1403
+ property `X`. This is a promise that should be fulfilled, but is *not* a
1404
+ runtime assert. For example, finite floating point precision may result
1405
+ in these promises being violated.
1406
+ * If `is_X == False`, callers should expect the operator to not have `X`.
1407
+ * If `is_X == None` (the default), callers should have no expectation either
1408
+ way.
1409
+ """
1410
+
1411
+ def __init__(self,
1412
+ spectrum: tensor.Tensor,
1413
+ input_output_dtype=dtypes.complex64,
1414
+ is_non_singular: bool = None,
1415
+ is_self_adjoint: bool = None,
1416
+ is_positive_definite: bool = None,
1417
+ is_square: bool = True,
1418
+ name="LinearOperatorCirculant3D"):
1419
+ """Initialize an `LinearOperatorCirculant`.
1420
+
1421
+ This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
1422
+ by providing `spectrum`, a `[B1,...,Bb, N0, N1, N2]` `Tensor`
1423
+ with `N0*N1*N2 = N`.
1424
+
1425
+ If `input_output_dtype = DTYPE`:
1426
+
1427
+ * Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
1428
+ * Values returned by all methods, such as `matmul` or `determinant` will be
1429
+ cast to `DTYPE`.
1430
+
1431
+ Note that if the spectrum is not Hermitian, then this operator corresponds
1432
+ to a complex matrix with non-zero imaginary part. In this case, setting
1433
+ `input_output_dtype` to a real type will forcibly cast the output to be
1434
+ real, resulting in incorrect results!
1435
+
1436
+ If on the other hand the spectrum is Hermitian, then this operator
1437
+ corresponds to a real-valued matrix, and setting `input_output_dtype` to
1438
+ a real type is fine.
1439
+
1440
+ Args:
1441
+ spectrum: Shape `[B1,...,Bb, N0, N1, N2]` `Tensor`. Allowed dtypes:
1442
+ `float16`, `float32`, `float64`, `complex64`, `complex128`.
1443
+ Type can be different than `input_output_dtype`
1444
+ input_output_dtype: `dtype` for input/output.
1445
+ is_non_singular: Expect that this operator is non-singular.
1446
+ is_self_adjoint: Expect that this operator is equal to its hermitian
1447
+ transpose. If `spectrum` is real, this will always be true.
1448
+ is_positive_definite: Expect that this operator is positive definite,
1449
+ meaning the real part of all eigenvalues is positive. We do not require
1450
+ the operator to be self-adjoint to be positive-definite. See:
1451
+ https://en.wikipedia.org/wiki/Positive-definite_matrix
1452
+ #Extension_for_non_symmetric_matrices
1453
+ is_square: Expect that this operator acts like square [batch] matrices.
1454
+ name: A name to prepend to all ops created by this class.
1455
+ """
1456
+ parameters = dict(
1457
+ spectrum=spectrum,
1458
+ input_output_dtype=input_output_dtype,
1459
+ is_non_singular=is_non_singular,
1460
+ is_self_adjoint=is_self_adjoint,
1461
+ is_positive_definite=is_positive_definite,
1462
+ is_square=is_square,
1463
+ name=name
1464
+ )
1465
+ super(LinearOperatorCirculant3D, self).__init__(
1466
+ spectrum,
1467
+ block_depth=3,
1468
+ input_output_dtype=input_output_dtype,
1469
+ is_non_singular=is_non_singular,
1470
+ is_self_adjoint=is_self_adjoint,
1471
+ is_positive_definite=is_positive_definite,
1472
+ is_square=is_square,
1473
+ parameters=parameters,
1474
+ name=name)
1475
+
1476
+ def _linop_adjoint(self) -> "LinearOperatorCirculant3D":
1477
+ spectrum = self.spectrum
1478
+ if spectrum.dtype.is_complex:
1479
+ spectrum = math_ops.conj(spectrum)
1480
+
1481
+ # Conjugating the spectrum is sufficient to get the adjoint.
1482
+ return LinearOperatorCirculant3D(
1483
+ spectrum=spectrum,
1484
+ is_non_singular=self.is_non_singular,
1485
+ is_self_adjoint=self.is_self_adjoint,
1486
+ is_positive_definite=self.is_positive_definite,
1487
+ is_square=True)
1488
+
1489
+ def _linop_inverse(self) -> "LinearOperatorCirculant3D":
1490
+ return LinearOperatorCirculant3D(
1491
+ spectrum=1. / self.spectrum,
1492
+ is_non_singular=self.is_non_singular,
1493
+ is_self_adjoint=self.is_self_adjoint,
1494
+ is_positive_definite=self.is_positive_definite,
1495
+ is_square=True,
1496
+ input_output_dtype=self.dtype)
1497
+
1498
+ def _linop_matmul(
1499
+ self,
1500
+ left_operator: "LinearOperatorCirculant3D",
1501
+ right_operator: linear_operator.LinearOperator,
1502
+ ) -> linear_operator.LinearOperator:
1503
+ if not isinstance(
1504
+ right_operator, LinearOperatorCirculant3D
1505
+ ) or not isinstance(left_operator, type(right_operator)):
1506
+ return super()._linop_matmul(left_operator, right_operator)
1507
+
1508
+ return LinearOperatorCirculant3D(
1509
+ spectrum=left_operator.spectrum * right_operator.spectrum,
1510
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1511
+ left_operator, right_operator
1512
+ ),
1513
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1514
+ left_operator, right_operator
1515
+ ),
1516
+ is_positive_definite=(
1517
+ property_hint_util.combined_commuting_positive_definite_hint(
1518
+ left_operator, right_operator
1519
+ )
1520
+ ),
1521
+ is_square=True,
1522
+ )
1523
+
1524
+ def _linop_solve(
1525
+ self,
1526
+ left_operator: "LinearOperatorCirculant3D",
1527
+ right_operator: linear_operator.LinearOperator,
1528
+ ) -> linear_operator.LinearOperator:
1529
+ if not isinstance(right_operator, LinearOperatorCirculant3D):
1530
+ return super()._linop_solve(left_operator, right_operator)
1531
+
1532
+ return LinearOperatorCirculant3D(
1533
+ spectrum=right_operator.spectrum / left_operator.spectrum,
1534
+ is_non_singular=property_hint_util.combined_non_singular_hint(
1535
+ left_operator, right_operator),
1536
+ is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
1537
+ left_operator, right_operator),
1538
+ is_positive_definite=(
1539
+ property_hint_util.combined_commuting_positive_definite_hint(
1540
+ left_operator, right_operator)),
1541
+ is_square=True)
1542
+
1543
+
1544
+ def _to_complex(x):
1545
+ if x.dtype.is_complex:
1546
+ return x
1547
+ dtype = dtypes.complex64
1548
+
1549
+ if x.dtype == dtypes.float64:
1550
+ dtype = dtypes.complex128
1551
+ return math_ops.cast(x, dtype)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_composition.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Composes one or more `LinearOperators`."""
16
+
17
+ from tensorflow.python.framework import common_shapes
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_shape
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.ops import array_ops_stack
23
+ from tensorflow.python.ops import check_ops
24
+ from tensorflow.python.ops import control_flow_ops
25
+ from tensorflow.python.ops import linalg_ops
26
+ from tensorflow.python.ops import math_ops
27
+ from tensorflow.python.ops.linalg import linear_operator
28
+ from tensorflow.python.ops.linalg import linear_operator_lower_triangular
29
+ from tensorflow.python.ops.linalg import linear_operator_util
30
+ from tensorflow.python.util.tf_export import tf_export
31
+
32
+ __all__ = ["LinearOperatorComposition"]
33
+
34
+
35
+ @tf_export("linalg.LinearOperatorComposition")
36
+ @linear_operator.make_composite_tensor
37
+ class LinearOperatorComposition(linear_operator.LinearOperator):
38
+ """Composes one or more `LinearOperators`.
39
+
40
+ This operator composes one or more linear operators `[op1,...,opJ]`,
41
+ building a new `LinearOperator` with action defined by:
42
+
43
+ ```
44
+ op_composed(x) := op1(op2(...(opJ(x)...))
45
+ ```
46
+
47
+ If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
48
+ [batch] matrix formed with the multiplication `A1 A2...AJ`.
49
+
50
+ If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
51
+ `N_j = M_{j+1}`, in which case the composed operator has shape equal to
52
+ `broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
53
+ mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
54
+ batch shapes broadcast. Even if the composed shape is well defined, the
55
+ composed operator's methods may fail due to lack of broadcasting ability in
56
+ the defining operators' methods.
57
+
58
+ ```python
59
+ # Create a 2 x 2 linear operator composed of two 2 x 2 operators.
60
+ operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
61
+ operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
62
+ operator = LinearOperatorComposition([operator_1, operator_2])
63
+
64
+ operator.to_dense()
65
+ ==> [[1., 2.]
66
+ [3., 4.]]
67
+
68
+ operator.shape
69
+ ==> [2, 2]
70
+
71
+ operator.log_abs_determinant()
72
+ ==> scalar Tensor
73
+
74
+ x = ... Shape [2, 4] Tensor
75
+ operator.matmul(x)
76
+ ==> Shape [2, 4] Tensor
77
+
78
+ # Create a [2, 3] batch of 4 x 5 linear operators.
79
+ matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
80
+ operator_45 = LinearOperatorFullMatrix(matrix)
81
+
82
+ # Create a [2, 3] batch of 5 x 6 linear operators.
83
+ matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
84
+ operator_56 = LinearOperatorFullMatrix(matrix_56)
85
+
86
+ # Compose to create a [2, 3] batch of 4 x 6 operators.
87
+ operator_46 = LinearOperatorComposition([operator_45, operator_56])
88
+
89
+ # Create a shape [2, 3, 6, 2] vector.
90
+ x = tf.random.normal(shape=[2, 3, 6, 2])
91
+ operator.matmul(x)
92
+ ==> Shape [2, 3, 4, 2] Tensor
93
+ ```
94
+
95
+ #### Performance
96
+
97
+ The performance of `LinearOperatorComposition` on any operation is equal to
98
+ the sum of the individual operators' operations.
99
+
100
+
101
+ #### Matrix property hints
102
+
103
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
104
+ for `X = non_singular, self_adjoint, positive_definite, square`.
105
+ These have the following meaning:
106
+
107
+ * If `is_X == True`, callers should expect the operator to have the
108
+ property `X`. This is a promise that should be fulfilled, but is *not* a
109
+ runtime assert. For example, finite floating point precision may result
110
+ in these promises being violated.
111
+ * If `is_X == False`, callers should expect the operator to not have `X`.
112
+ * If `is_X == None` (the default), callers should have no expectation either
113
+ way.
114
+ """
115
+
116
+ def __init__(self,
117
+ operators,
118
+ is_non_singular=None,
119
+ is_self_adjoint=None,
120
+ is_positive_definite=None,
121
+ is_square=None,
122
+ name=None):
123
+ r"""Initialize a `LinearOperatorComposition`.
124
+
125
+ `LinearOperatorComposition` is initialized with a list of operators
126
+ `[op_1,...,op_J]`. For the `matmul` method to be well defined, the
127
+ composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have
128
+ similar constraints.
129
+
130
+ Args:
131
+ operators: Iterable of `LinearOperator` objects, each with
132
+ the same `dtype` and composable shape.
133
+ is_non_singular: Expect that this operator is non-singular.
134
+ is_self_adjoint: Expect that this operator is equal to its hermitian
135
+ transpose.
136
+ is_positive_definite: Expect that this operator is positive definite,
137
+ meaning the quadratic form `x^H A x` has positive real part for all
138
+ nonzero `x`. Note that we do not require the operator to be
139
+ self-adjoint to be positive-definite. See:
140
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
141
+ is_square: Expect that this operator acts like square [batch] matrices.
142
+ name: A name for this `LinearOperator`. Default is the individual
143
+ operators names joined with `_o_`.
144
+
145
+ Raises:
146
+ TypeError: If all operators do not have the same `dtype`.
147
+ ValueError: If `operators` is empty.
148
+ """
149
+ parameters = dict(
150
+ operators=operators,
151
+ is_non_singular=is_non_singular,
152
+ is_self_adjoint=is_self_adjoint,
153
+ is_positive_definite=is_positive_definite,
154
+ is_square=is_square,
155
+ name=name)
156
+
157
+ # Validate operators.
158
+ check_ops.assert_proper_iterable(operators)
159
+ operators = list(operators)
160
+ if not operators:
161
+ raise ValueError(
162
+ "Expected a non-empty list of operators. Found: %s" % operators)
163
+ self._operators = operators
164
+
165
+ # Validate dtype.
166
+ dtype = operators[0].dtype
167
+ for operator in operators:
168
+ if operator.dtype != dtype:
169
+ name_type = (str((o.name, o.dtype)) for o in operators)
170
+ raise TypeError(
171
+ "Expected all operators to have the same dtype. Found %s"
172
+ % " ".join(name_type))
173
+
174
+ # Auto-set and check hints.
175
+ if all(operator.is_non_singular for operator in operators):
176
+ if is_non_singular is False: # pylint:disable=g-bool-id-comparison
177
+ raise ValueError(
178
+ "The composition of non-singular operators is always non-singular.")
179
+ is_non_singular = True
180
+
181
+ if _composition_must_be_self_adjoint(operators):
182
+ if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
183
+ raise ValueError(
184
+ "The composition was determined to be self-adjoint but user "
185
+ "provided incorrect `False` hint.")
186
+ is_self_adjoint = True
187
+
188
+ if linear_operator_util.is_aat_form(operators):
189
+ if is_square is False: # pylint:disable=g-bool-id-comparison
190
+ raise ValueError(
191
+ "The composition was determined have the form "
192
+ "A @ A.H, hence it must be square. The user "
193
+ "provided an incorrect `False` hint.")
194
+ is_square = True
195
+
196
+ if linear_operator_util.is_aat_form(operators) and is_non_singular:
197
+ if is_positive_definite is False: # pylint:disable=g-bool-id-comparison
198
+ raise ValueError(
199
+ "The composition was determined to be non-singular and have the "
200
+ "form A @ A.H, hence it must be positive-definite. The user "
201
+ "provided an incorrect `False` hint.")
202
+ is_positive_definite = True
203
+
204
+ # Initialization.
205
+
206
+ if name is None:
207
+ name = "_o_".join(operator.name for operator in operators)
208
+ with ops.name_scope(name):
209
+ super(LinearOperatorComposition, self).__init__(
210
+ dtype=dtype,
211
+ is_non_singular=is_non_singular,
212
+ is_self_adjoint=is_self_adjoint,
213
+ is_positive_definite=is_positive_definite,
214
+ is_square=is_square,
215
+ parameters=parameters,
216
+ name=name)
217
+
218
+ @property
219
+ def operators(self):
220
+ return self._operators
221
+
222
+ def _shape(self):
223
+ # Get final matrix shape.
224
+ domain_dimension = self.operators[0].domain_dimension
225
+ for operator in self.operators[1:]:
226
+ domain_dimension.assert_is_compatible_with(operator.range_dimension)
227
+ domain_dimension = operator.domain_dimension
228
+
229
+ matrix_shape = tensor_shape.TensorShape(
230
+ [self.operators[0].range_dimension,
231
+ self.operators[-1].domain_dimension])
232
+
233
+ # Get broadcast batch shape.
234
+ # broadcast_shape checks for compatibility.
235
+ batch_shape = self.operators[0].batch_shape
236
+ for operator in self.operators[1:]:
237
+ batch_shape = common_shapes.broadcast_shape(
238
+ batch_shape, operator.batch_shape)
239
+
240
+ return batch_shape.concatenate(matrix_shape)
241
+
242
+ def _shape_tensor(self):
243
+ # Avoid messy broadcasting if possible.
244
+ if self.shape.is_fully_defined():
245
+ return ops.convert_to_tensor(
246
+ self.shape.as_list(), dtype=dtypes.int32, name="shape")
247
+
248
+ # Don't check the matrix dimensions. That would add unnecessary Asserts to
249
+ # the graph. Things will fail at runtime naturally if shapes are
250
+ # incompatible.
251
+ matrix_shape = array_ops_stack.stack([
252
+ self.operators[0].range_dimension_tensor(),
253
+ self.operators[-1].domain_dimension_tensor()
254
+ ])
255
+
256
+ # Dummy Tensor of zeros. Will never be materialized.
257
+ zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
258
+ for operator in self.operators[1:]:
259
+ zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
260
+ batch_shape = array_ops.shape(zeros)
261
+
262
+ return array_ops.concat((batch_shape, matrix_shape), 0)
263
+
264
+ def _linop_cholesky(self) -> linear_operator.LinearOperator:
265
+ """Computes Cholesky(LinearOperatorComposition)."""
266
+ # L @ L.H will be handled with special code below. Why is L @ L.H the most
267
+ # important special case?
268
+ # Note that Diag @ Diag.H and Diag @ TriL and TriL @ Diag are already
269
+ # compressed to Diag or TriL by diag matmul
270
+ # registration. Similarly for Identity and ScaledIdentity.
271
+ # So these would not appear in a LinearOperatorComposition unless explicitly
272
+ # constructed as such. So the most important thing to check is L @ L.H.
273
+ def _is_llt_product(self):
274
+ """Determines if linop = L @ L.H for L = LinearOperatorLowerTriangular."""
275
+ if len(self.operators) != 2:
276
+ return False
277
+ if not linear_operator_util.is_aat_form(self.operators):
278
+ return False
279
+ return isinstance(
280
+ self.operators[0],
281
+ linear_operator_lower_triangular.LinearOperatorLowerTriangular)
282
+
283
+ if not _is_llt_product(self):
284
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
285
+ linalg_ops.cholesky(self.to_dense()),
286
+ is_non_singular=True,
287
+ is_self_adjoint=False,
288
+ is_square=True)
289
+
290
+ left_op = self.operators[0]
291
+
292
+ # left_op.is_positive_definite ==> op already has positive diag,return it.
293
+ if left_op.is_positive_definite:
294
+ return left_op
295
+
296
+ # Recall that the base class has already verified
297
+ # linop.is_positive_definite, else linop.cholesky() would have raised.
298
+ # So in particular, we know the diagonal has nonzero entries.
299
+ # In the generic case, we make op have positive diag by dividing each row
300
+ # by the sign of the diag. This is equivalent to setting A = L @ D where
301
+ # D is diag(sign(1 / L.diag_part())). Then A is lower triangular with
302
+ # positive diag and A @ A^H = L @ D @ D^H @ L^H = L @ L^H = linop.
303
+ # This also works for complex L,
304
+ # since sign(x + iy) = exp(i * angle(x + iy)).
305
+ diag_sign = array_ops.expand_dims(
306
+ math_ops.sign(left_op.diag_part()), axis=-2)
307
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
308
+ tril=left_op.tril / diag_sign,
309
+ is_non_singular=left_op.is_non_singular,
310
+ # L.is_self_adjoint ==> L is diagonal ==> L @ D is diagonal ==> SA
311
+ # L.is_self_adjoint is False ==> L not diagonal ==> L @ D not diag ...
312
+ is_self_adjoint=left_op.is_self_adjoint,
313
+ # L.is_positive_definite ==> L has positive diag ==> L = L @ D
314
+ # ==> (L @ D).is_positive_definite.
315
+ # L.is_positive_definite is False could result
316
+ # in L @ D being PD or not.
317
+ # Consider L = [[1, 0], [-2, 1]] and quadratic form with x = [1, 1].
318
+ # Note we will already return left_op if left_op.is_positive_definite
319
+ # above, but to be explicit write this below.
320
+ is_positive_definite=True if left_op.is_positive_definite else None,
321
+ is_square=True,
322
+ )
323
+
324
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
325
+ # If self.operators = [A, B], and not adjoint, then
326
+ # matmul_order_list = [B, A].
327
+ # As a result, we return A.matmul(B.matmul(x))
328
+ if adjoint:
329
+ matmul_order_list = self.operators
330
+ else:
331
+ matmul_order_list = list(reversed(self.operators))
332
+
333
+ result = matmul_order_list[0].matmul(
334
+ x, adjoint=adjoint, adjoint_arg=adjoint_arg)
335
+ for operator in matmul_order_list[1:]:
336
+ result = operator.matmul(result, adjoint=adjoint)
337
+ return result
338
+
339
+ def _determinant(self):
340
+ result = self.operators[0].determinant()
341
+ for operator in self.operators[1:]:
342
+ result *= operator.determinant()
343
+ return result
344
+
345
+ def _log_abs_determinant(self):
346
+ result = self.operators[0].log_abs_determinant()
347
+ for operator in self.operators[1:]:
348
+ result += operator.log_abs_determinant()
349
+ return result
350
+
351
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
352
+ # TODO(langmore) Implement solve using solve_ls if some intermediate
353
+ # operator maps to a high dimensional space.
354
+ # In that case, an exact solve may still be possible.
355
+
356
+ # If self.operators = [A, B], and not adjoint, then
357
+ # solve_order_list = [A, B].
358
+ # As a result, we return B.solve(A.solve(x))
359
+ if adjoint:
360
+ solve_order_list = list(reversed(self.operators))
361
+ else:
362
+ solve_order_list = self.operators
363
+
364
+ solution = solve_order_list[0].solve(
365
+ rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
366
+ for operator in solve_order_list[1:]:
367
+ solution = operator.solve(solution, adjoint=adjoint)
368
+ return solution
369
+
370
+ def _assert_non_singular(self):
371
+ if all(operator.is_square for operator in self.operators):
372
+ asserts = [operator.assert_non_singular() for operator in self.operators]
373
+ return control_flow_ops.group(asserts)
374
+ return super(LinearOperatorComposition, self)._assert_non_singular()
375
+
376
+ @property
377
+ def _composite_tensor_fields(self):
378
+ return ("operators",)
379
+
380
+ @property
381
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
382
+ return {"operators": [0] * len(self.operators)}
383
+
384
+
385
+ def _composition_must_be_self_adjoint(operators):
386
+ """Runs some checks to see if composition operators must be SA.
387
+
388
+ Args:
389
+ operators: List of LinearOperators.
390
+
391
+ Returns:
392
+ True if the composition must be SA. False if it is not SA OR if we did not
393
+ determine whether the composition is SA.
394
+ """
395
+ if len(operators) == 1 and operators[0].is_self_adjoint:
396
+ return True
397
+
398
+ # Check for forms like A @ A.H or (A1 @ A2) @ (A2.H @ A1.H) or ...
399
+ if linear_operator_util.is_aat_form(operators):
400
+ return True
401
+
402
+ # Done checking...could still be SA.
403
+ # We may not catch some cases. E.g. (A @ I) @ A.H is SA, but is not AAT form.
404
+ return False
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_diag.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`LinearOperator` acting like a diagonal matrix."""
16
+
17
+ from tensorflow.python.framework import ops
18
+ from tensorflow.python.framework import tensor_conversion
19
+ from tensorflow.python.ops import array_ops
20
+ from tensorflow.python.ops import check_ops
21
+ from tensorflow.python.ops import math_ops
22
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
23
+ from tensorflow.python.ops.linalg import linear_operator
24
+ from tensorflow.python.ops.linalg import linear_operator_lower_triangular
25
+ from tensorflow.python.ops.linalg import linear_operator_util
26
+ from tensorflow.python.ops.linalg import property_hint_util
27
+ from tensorflow.python.util.tf_export import tf_export
28
+
29
+ __all__ = ["LinearOperatorDiag",]
30
+
31
+
32
+ @tf_export("linalg.LinearOperatorDiag")
33
+ @linear_operator.make_composite_tensor
34
+ class LinearOperatorDiag(linear_operator.LinearOperator):
35
+ """`LinearOperator` acting like a [batch] square diagonal matrix.
36
+
37
+ This operator acts like a [batch] diagonal matrix `A` with shape
38
+ `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
39
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
40
+ an `N x N` matrix. This matrix `A` is not materialized, but for
41
+ purposes of broadcasting this shape will be relevant.
42
+
43
+ `LinearOperatorDiag` is initialized with a (batch) vector.
44
+
45
+ ```python
46
+ # Create a 2 x 2 diagonal linear operator.
47
+ diag = [1., -1.]
48
+ operator = LinearOperatorDiag(diag)
49
+
50
+ operator.to_dense()
51
+ ==> [[1., 0.]
52
+ [0., -1.]]
53
+
54
+ operator.shape
55
+ ==> [2, 2]
56
+
57
+ operator.log_abs_determinant()
58
+ ==> scalar Tensor
59
+
60
+ x = ... Shape [2, 4] Tensor
61
+ operator.matmul(x)
62
+ ==> Shape [2, 4] Tensor
63
+
64
+ # Create a [2, 3] batch of 4 x 4 linear operators.
65
+ diag = tf.random.normal(shape=[2, 3, 4])
66
+ operator = LinearOperatorDiag(diag)
67
+
68
+ # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
69
+ # since the batch dimensions, [2, 1], are broadcast to
70
+ # operator.batch_shape = [2, 3].
71
+ y = tf.random.normal(shape=[2, 1, 4, 2])
72
+ x = operator.solve(y)
73
+ ==> operator.matmul(x) = y
74
+ ```
75
+
76
+ #### Shape compatibility
77
+
78
+ This operator acts on [batch] matrix with compatible shape.
79
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
80
+
81
+ ```
82
+ operator.shape = [B1,...,Bb] + [N, N], with b >= 0
83
+ x.shape = [C1,...,Cc] + [N, R],
84
+ and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
85
+ ```
86
+
87
+ #### Performance
88
+
89
+ Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
90
+ and `x.shape = [N, R]`. Then
91
+
92
+ * `operator.matmul(x)` involves `N * R` multiplications.
93
+ * `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
94
+ * `operator.determinant()` involves a size `N` `reduce_prod`.
95
+
96
+ If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
97
+ `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
98
+
99
+ #### Matrix property hints
100
+
101
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
102
+ for `X = non_singular, self_adjoint, positive_definite, square`.
103
+ These have the following meaning:
104
+
105
+ * If `is_X == True`, callers should expect the operator to have the
106
+ property `X`. This is a promise that should be fulfilled, but is *not* a
107
+ runtime assert. For example, finite floating point precision may result
108
+ in these promises being violated.
109
+ * If `is_X == False`, callers should expect the operator to not have `X`.
110
+ * If `is_X == None` (the default), callers should have no expectation either
111
+ way.
112
+ """
113
+
114
+ def __init__(self,
115
+ diag,
116
+ is_non_singular=None,
117
+ is_self_adjoint=None,
118
+ is_positive_definite=None,
119
+ is_square=None,
120
+ name="LinearOperatorDiag"):
121
+ r"""Initialize a `LinearOperatorDiag`.
122
+
123
+ Args:
124
+ diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
125
+ The diagonal of the operator. Allowed dtypes: `float16`, `float32`,
126
+ `float64`, `complex64`, `complex128`.
127
+ is_non_singular: Expect that this operator is non-singular.
128
+ is_self_adjoint: Expect that this operator is equal to its hermitian
129
+ transpose. If `diag.dtype` is real, this is auto-set to `True`.
130
+ is_positive_definite: Expect that this operator is positive definite,
131
+ meaning the quadratic form `x^H A x` has positive real part for all
132
+ nonzero `x`. Note that we do not require the operator to be
133
+ self-adjoint to be positive-definite. See:
134
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
135
+ is_square: Expect that this operator acts like square [batch] matrices.
136
+ name: A name for this `LinearOperator`.
137
+
138
+ Raises:
139
+ TypeError: If `diag.dtype` is not an allowed type.
140
+ ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
141
+ """
142
+ parameters = dict(
143
+ diag=diag,
144
+ is_non_singular=is_non_singular,
145
+ is_self_adjoint=is_self_adjoint,
146
+ is_positive_definite=is_positive_definite,
147
+ is_square=is_square,
148
+ name=name
149
+ )
150
+
151
+ with ops.name_scope(name, values=[diag]):
152
+ self._diag = linear_operator_util.convert_nonref_to_tensor(
153
+ diag, name="diag")
154
+ self._check_diag(self._diag)
155
+
156
+ # Check and auto-set hints.
157
+ if not self._diag.dtype.is_complex:
158
+ if is_self_adjoint is False:
159
+ raise ValueError("A real diagonal operator is always self adjoint.")
160
+ else:
161
+ is_self_adjoint = True
162
+
163
+ if is_square is False:
164
+ raise ValueError("Only square diagonal operators currently supported.")
165
+ is_square = True
166
+
167
+ super(LinearOperatorDiag, self).__init__(
168
+ dtype=self._diag.dtype,
169
+ is_non_singular=is_non_singular,
170
+ is_self_adjoint=is_self_adjoint,
171
+ is_positive_definite=is_positive_definite,
172
+ is_square=is_square,
173
+ parameters=parameters,
174
+ name=name)
175
+
176
+ def _check_diag(self, diag):
177
+ """Static check of diag."""
178
+ if diag.shape.ndims is not None and diag.shape.ndims < 1:
179
+ raise ValueError("Argument diag must have at least 1 dimension. "
180
+ "Found: %s" % diag)
181
+
182
+ def _shape(self):
183
+ # If d_shape = [5, 3], we return [5, 3, 3].
184
+ d_shape = self._diag.shape
185
+ return d_shape.concatenate(d_shape[-1:])
186
+
187
+ def _shape_tensor(self):
188
+ d_shape = array_ops.shape(self._diag)
189
+ k = d_shape[-1]
190
+ return array_ops.concat((d_shape, [k]), 0)
191
+
192
+ @property
193
+ def diag(self):
194
+ return self._diag
195
+
196
+ def _linop_inverse(self) -> "LinearOperatorDiag":
197
+ return LinearOperatorDiag(
198
+ 1. / self.diag,
199
+ is_non_singular=self.is_non_singular,
200
+ is_self_adjoint=self.is_self_adjoint,
201
+ is_positive_definite=self.is_positive_definite,
202
+ is_square=True)
203
+
204
+ def _linop_matmul(
205
+ self,
206
+ left_operator: "LinearOperatorDiag",
207
+ right_operator: linear_operator.LinearOperator,
208
+ ) -> linear_operator.LinearOperator:
209
+ is_non_singular = property_hint_util.combined_non_singular_hint(
210
+ left_operator, right_operator)
211
+ is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint(
212
+ left_operator, right_operator)
213
+ is_positive_definite = (
214
+ property_hint_util.combined_commuting_positive_definite_hint(
215
+ left_operator, right_operator))
216
+ if isinstance(right_operator, LinearOperatorDiag):
217
+ return LinearOperatorDiag(
218
+ diag=left_operator.diag * right_operator.diag,
219
+ is_non_singular=is_non_singular,
220
+ is_self_adjoint=is_self_adjoint,
221
+ is_positive_definite=is_positive_definite,
222
+ is_square=True,
223
+ )
224
+ # instance of linear_operator_identity.LinearOperatorScaledIdentity
225
+ elif hasattr(right_operator, "_ones_diag") and hasattr(
226
+ right_operator, "multiplier"
227
+ ):
228
+ return LinearOperatorDiag(
229
+ diag=left_operator.diag * right_operator.multiplier,
230
+ is_non_singular=is_non_singular,
231
+ is_self_adjoint=is_self_adjoint,
232
+ is_positive_definite=is_positive_definite,
233
+ is_square=True)
234
+ elif isinstance(
235
+ right_operator,
236
+ linear_operator_lower_triangular.LinearOperatorLowerTriangular,
237
+ ):
238
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
239
+ tril=left_operator.diag[..., None] * right_operator.to_dense(),
240
+ is_non_singular=is_non_singular,
241
+ # This is safe to do since the Triangular matrix is only self-adjoint
242
+ # when it is a diagonal matrix, and hence commutes.
243
+ is_self_adjoint=is_self_adjoint,
244
+ is_positive_definite=None,
245
+ is_square=True)
246
+ else:
247
+ return super()._linop_matmul(left_operator, right_operator)
248
+
249
+ def _linop_solve(
250
+ self,
251
+ left_operator: "LinearOperatorDiag",
252
+ right_operator: linear_operator.LinearOperator,
253
+ ) -> linear_operator.LinearOperator:
254
+ is_non_singular = property_hint_util.combined_non_singular_hint(
255
+ left_operator, right_operator)
256
+ is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint(
257
+ left_operator, right_operator)
258
+ is_positive_definite = (
259
+ property_hint_util.combined_commuting_positive_definite_hint(
260
+ left_operator, right_operator))
261
+ if isinstance(right_operator, LinearOperatorDiag):
262
+ return LinearOperatorDiag(
263
+ diag=right_operator.diag / left_operator.diag,
264
+ is_non_singular=is_non_singular,
265
+ is_self_adjoint=is_self_adjoint,
266
+ is_positive_definite=is_positive_definite,
267
+ is_square=True)
268
+ # instance of linear_operator_identity.LinearOperatorScaledIdentity
269
+ elif (hasattr(right_operator, "_ones_diag")
270
+ and hasattr(right_operator, "multiplier")):
271
+ return LinearOperatorDiag(
272
+ diag=right_operator.multiplier / left_operator.diag,
273
+ is_non_singular=is_non_singular,
274
+ is_self_adjoint=is_self_adjoint,
275
+ is_positive_definite=is_positive_definite,
276
+ is_square=True)
277
+ elif isinstance(
278
+ right_operator,
279
+ linear_operator_lower_triangular.LinearOperatorLowerTriangular):
280
+ return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
281
+ tril=right_operator.to_dense() / left_operator.diag[..., None],
282
+ is_non_singular=is_non_singular,
283
+ # This is safe to do since the Triangular matrix is only self-adjoint
284
+ # when it is a diagonal matrix, and hence commutes.
285
+ is_self_adjoint=is_self_adjoint,
286
+ is_positive_definite=None,
287
+ is_square=True)
288
+ else:
289
+ return super()._linop_solve(left_operator, right_operator)
290
+
291
+ def _assert_non_singular(self):
292
+ return linear_operator_util.assert_no_entries_with_modulus_zero(
293
+ self._diag,
294
+ message="Singular operator: Diagonal contained zero values.")
295
+
296
+ def _assert_positive_definite(self):
297
+ if self.dtype.is_complex:
298
+ message = (
299
+ "Diagonal operator had diagonal entries with non-positive real part, "
300
+ "thus was not positive definite.")
301
+ else:
302
+ message = (
303
+ "Real diagonal operator had non-positive diagonal entries, "
304
+ "thus was not positive definite.")
305
+
306
+ return check_ops.assert_positive(
307
+ math_ops.real(self._diag),
308
+ message=message)
309
+
310
+ def _assert_self_adjoint(self):
311
+ return linear_operator_util.assert_zero_imag_part(
312
+ self._diag,
313
+ message=(
314
+ "This diagonal operator contained non-zero imaginary values. "
315
+ " Thus it was not self-adjoint."))
316
+
317
+ def _linop_adjoint(self) -> "LinearOperatorDiag":
318
+ diag = self.diag
319
+ if diag.dtype.is_complex:
320
+ diag = math_ops.conj(diag)
321
+
322
+ return LinearOperatorDiag(
323
+ diag=diag,
324
+ is_non_singular=self.is_non_singular,
325
+ is_self_adjoint=self.is_self_adjoint,
326
+ is_positive_definite=self.is_positive_definite,
327
+ is_square=True)
328
+
329
+ def _linop_cholesky(self) -> "LinearOperatorDiag":
330
+ return LinearOperatorDiag(
331
+ math_ops.sqrt(self.diag),
332
+ is_non_singular=True,
333
+ is_self_adjoint=True,
334
+ is_positive_definite=True,
335
+ is_square=True)
336
+
337
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
338
+ diag_term = math_ops.conj(self._diag) if adjoint else self._diag
339
+ x = linalg.adjoint(x) if adjoint_arg else x
340
+ diag_mat = array_ops.expand_dims(diag_term, -1)
341
+ return diag_mat * x
342
+
343
+ def _matvec(self, x, adjoint=False):
344
+ diag_term = math_ops.conj(self._diag) if adjoint else self._diag
345
+ return diag_term * x
346
+
347
+ def _determinant(self):
348
+ return math_ops.reduce_prod(self._diag, axis=[-1])
349
+
350
+ def _log_abs_determinant(self):
351
+ log_det = math_ops.reduce_sum(
352
+ math_ops.log(math_ops.abs(self._diag)), axis=[-1])
353
+ if self.dtype.is_complex:
354
+ log_det = math_ops.cast(log_det, dtype=self.dtype)
355
+ return log_det
356
+
357
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
358
+ diag_term = math_ops.conj(self._diag) if adjoint else self._diag
359
+ rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
360
+ inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
361
+ return rhs * inv_diag_mat
362
+
363
+ def _to_dense(self):
364
+ return array_ops.matrix_diag(self._diag)
365
+
366
+ def _diag_part(self):
367
+ return self.diag
368
+
369
+ def _add_to_tensor(self, x):
370
+ x_diag = array_ops.matrix_diag_part(x)
371
+ new_diag = self._diag + x_diag
372
+ return array_ops.matrix_set_diag(x, new_diag)
373
+
374
+ def _eigvals(self):
375
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(self.diag)
376
+
377
+ def _cond(self):
378
+ abs_diag = math_ops.abs(self.diag)
379
+ return (math_ops.reduce_max(abs_diag, axis=-1) /
380
+ math_ops.reduce_min(abs_diag, axis=-1))
381
+
382
+ @property
383
+ def _composite_tensor_fields(self):
384
+ return ("diag",)
385
+
386
+ @property
387
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
388
+ return {"diag": 1}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_full_matrix.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`LinearOperator` that wraps a [batch] matrix."""
16
+
17
+ from tensorflow.python.framework import dtypes
18
+ from tensorflow.python.framework import ops
19
+ from tensorflow.python.framework import tensor_conversion
20
+ from tensorflow.python.ops import array_ops
21
+ from tensorflow.python.ops import math_ops
22
+ from tensorflow.python.ops.linalg import linear_operator
23
+ from tensorflow.python.ops.linalg import linear_operator_util
24
+ from tensorflow.python.util.tf_export import tf_export
25
+
26
+ __all__ = ["LinearOperatorFullMatrix"]
27
+
28
+
29
+ @tf_export("linalg.LinearOperatorFullMatrix")
30
+ @linear_operator.make_composite_tensor
31
+ class LinearOperatorFullMatrix(linear_operator.LinearOperator):
32
+ """`LinearOperator` that wraps a [batch] matrix.
33
+
34
+ This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
35
+ `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
36
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
37
+ an `M x N` matrix.
38
+
39
+ ```python
40
+ # Create a 2 x 2 linear operator.
41
+ matrix = [[1., 2.], [3., 4.]]
42
+ operator = LinearOperatorFullMatrix(matrix)
43
+
44
+ operator.to_dense()
45
+ ==> [[1., 2.]
46
+ [3., 4.]]
47
+
48
+ operator.shape
49
+ ==> [2, 2]
50
+
51
+ operator.log_abs_determinant()
52
+ ==> scalar Tensor
53
+
54
+ x = ... Shape [2, 4] Tensor
55
+ operator.matmul(x)
56
+ ==> Shape [2, 4] Tensor
57
+
58
+ # Create a [2, 3] batch of 4 x 4 linear operators.
59
+ matrix = tf.random.normal(shape=[2, 3, 4, 4])
60
+ operator = LinearOperatorFullMatrix(matrix)
61
+ ```
62
+
63
+ #### Shape compatibility
64
+
65
+ This operator acts on [batch] matrix with compatible shape.
66
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
67
+
68
+ ```
69
+ operator.shape = [B1,...,Bb] + [M, N], with b >= 0
70
+ x.shape = [B1,...,Bb] + [N, R], with R >= 0.
71
+ ```
72
+
73
+ #### Performance
74
+
75
+ `LinearOperatorFullMatrix` has exactly the same performance as would be
76
+ achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
77
+ made based on the following initialization hints.
78
+
79
+ * If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
80
+ Cholesky factorization is used for the determinant and solve.
81
+
82
+ In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
83
+ `[M, N]`, and `x.shape = [N, R]`. Then
84
+
85
+ * `operator.matmul(x)` is `O(M * N * R)`.
86
+ * If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
87
+ * If `M=N`, `operator.determinant()` is `O(N^3)`.
88
+
89
+ If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
90
+ `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
91
+
92
+ #### Matrix property hints
93
+
94
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
95
+ for `X = non_singular, self_adjoint, positive_definite, square`.
96
+ These have the following meaning:
97
+
98
+ * If `is_X == True`, callers should expect the operator to have the
99
+ property `X`. This is a promise that should be fulfilled, but is *not* a
100
+ runtime assert. For example, finite floating point precision may result
101
+ in these promises being violated.
102
+ * If `is_X == False`, callers should expect the operator to not have `X`.
103
+ * If `is_X == None` (the default), callers should have no expectation either
104
+ way.
105
+ """
106
+
107
+ def __init__(self,
108
+ matrix,
109
+ is_non_singular=None,
110
+ is_self_adjoint=None,
111
+ is_positive_definite=None,
112
+ is_square=None,
113
+ name="LinearOperatorFullMatrix"):
114
+ r"""Initialize a `LinearOperatorFullMatrix`.
115
+
116
+ Args:
117
+ matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
118
+ Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
119
+ `complex128`.
120
+ is_non_singular: Expect that this operator is non-singular.
121
+ is_self_adjoint: Expect that this operator is equal to its hermitian
122
+ transpose.
123
+ is_positive_definite: Expect that this operator is positive definite,
124
+ meaning the quadratic form `x^H A x` has positive real part for all
125
+ nonzero `x`. Note that we do not require the operator to be
126
+ self-adjoint to be positive-definite. See:
127
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
128
+ is_square: Expect that this operator acts like square [batch] matrices.
129
+ name: A name for this `LinearOperator`.
130
+
131
+ Raises:
132
+ TypeError: If `diag.dtype` is not an allowed type.
133
+ """
134
+ parameters = dict(
135
+ matrix=matrix,
136
+ is_non_singular=is_non_singular,
137
+ is_self_adjoint=is_self_adjoint,
138
+ is_positive_definite=is_positive_definite,
139
+ is_square=is_square,
140
+ name=name
141
+ )
142
+
143
+ with ops.name_scope(name, values=[matrix]):
144
+ self._matrix = linear_operator_util.convert_nonref_to_tensor(
145
+ matrix, name="matrix")
146
+ self._check_matrix(self._matrix)
147
+
148
+ super(LinearOperatorFullMatrix, self).__init__(
149
+ dtype=self._matrix.dtype,
150
+ is_non_singular=is_non_singular,
151
+ is_self_adjoint=is_self_adjoint,
152
+ is_positive_definite=is_positive_definite,
153
+ is_square=is_square,
154
+ parameters=parameters,
155
+ name=name)
156
+
157
+ def _check_matrix(self, matrix):
158
+ """Static check of the `matrix` argument."""
159
+ allowed_dtypes = [
160
+ dtypes.float16,
161
+ dtypes.float32,
162
+ dtypes.float64,
163
+ dtypes.complex64,
164
+ dtypes.complex128,
165
+ ]
166
+
167
+ matrix = tensor_conversion.convert_to_tensor_v2_with_dispatch(
168
+ matrix, name="matrix"
169
+ )
170
+
171
+ dtype = matrix.dtype
172
+ if dtype not in allowed_dtypes:
173
+ raise TypeError(f"Argument `matrix` must have dtype in {allowed_dtypes}. "
174
+ f"Received: {dtype}.")
175
+
176
+ if matrix.shape.ndims is not None and matrix.shape.ndims < 2:
177
+ raise ValueError(f"Argument `matrix` must have at least 2 dimensions. "
178
+ f"Received: {matrix}.")
179
+
180
+ @property
181
+ def matrix(self):
182
+ """The matrix defining this operator."""
183
+ return self._matrix
184
+
185
+ def _shape(self):
186
+ return self._matrix.shape
187
+
188
+ def _shape_tensor(self):
189
+ return array_ops.shape(self._matrix)
190
+
191
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
192
+ return math_ops.matmul(
193
+ self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
194
+
195
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
196
+ return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
197
+
198
+ def _to_dense(self):
199
+ return self._matrix
200
+
201
+ @property
202
+ def _composite_tensor_fields(self):
203
+ return ("matrix",)
204
+
205
+ @property
206
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
207
+ return {"matrix": 2}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_householder.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`LinearOperator` acting like a Householder transformation."""
16
+
17
+ from tensorflow.python.framework import errors
18
+ from tensorflow.python.framework import ops
19
+ from tensorflow.python.framework import tensor_conversion
20
+ from tensorflow.python.ops import array_ops
21
+ from tensorflow.python.ops import control_flow_ops
22
+ from tensorflow.python.ops import math_ops
23
+ from tensorflow.python.ops import nn
24
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
25
+ from tensorflow.python.ops.linalg import linear_operator
26
+ from tensorflow.python.ops.linalg import linear_operator_util
27
+ from tensorflow.python.util.tf_export import tf_export
28
+
29
+ __all__ = ["LinearOperatorHouseholder",]
30
+
31
+
32
+ @tf_export("linalg.LinearOperatorHouseholder")
33
+ @linear_operator.make_composite_tensor
34
+ class LinearOperatorHouseholder(linear_operator.LinearOperator):
35
+ """`LinearOperator` acting like a [batch] of Householder transformations.
36
+
37
+ This operator acts like a [batch] of householder reflections with shape
38
+ `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
39
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
40
+ an `N x N` matrix. This matrix `A` is not materialized, but for
41
+ purposes of broadcasting this shape will be relevant.
42
+
43
+ `LinearOperatorHouseholder` is initialized with a (batch) vector.
44
+
45
+ A Householder reflection, defined via a vector `v`, which reflects points
46
+ in `R^n` about the hyperplane orthogonal to `v` and through the origin.
47
+
48
+ ```python
49
+ # Create a 2 x 2 householder transform.
50
+ vec = [1 / np.sqrt(2), 1. / np.sqrt(2)]
51
+ operator = LinearOperatorHouseholder(vec)
52
+
53
+ operator.to_dense()
54
+ ==> [[0., -1.]
55
+ [-1., -0.]]
56
+
57
+ operator.shape
58
+ ==> [2, 2]
59
+
60
+ operator.log_abs_determinant()
61
+ ==> scalar Tensor
62
+
63
+ x = ... Shape [2, 4] Tensor
64
+ operator.matmul(x)
65
+ ==> Shape [2, 4] Tensor
66
+ ```
67
+
68
+ #### Shape compatibility
69
+
70
+ This operator acts on [batch] matrix with compatible shape.
71
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
72
+
73
+ ```
74
+ operator.shape = [B1,...,Bb] + [N, N], with b >= 0
75
+ x.shape = [C1,...,Cc] + [N, R],
76
+ and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
77
+ ```
78
+
79
+ #### Matrix property hints
80
+
81
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
82
+ for `X = non_singular, self_adjoint, positive_definite, square`.
83
+ These have the following meaning:
84
+
85
+ * If `is_X == True`, callers should expect the operator to have the
86
+ property `X`. This is a promise that should be fulfilled, but is *not* a
87
+ runtime assert. For example, finite floating point precision may result
88
+ in these promises being violated.
89
+ * If `is_X == False`, callers should expect the operator to not have `X`.
90
+ * If `is_X == None` (the default), callers should have no expectation either
91
+ way.
92
+ """
93
+
94
+ def __init__(self,
95
+ reflection_axis,
96
+ is_non_singular=None,
97
+ is_self_adjoint=None,
98
+ is_positive_definite=None,
99
+ is_square=None,
100
+ name="LinearOperatorHouseholder"):
101
+ r"""Initialize a `LinearOperatorHouseholder`.
102
+
103
+ Args:
104
+ reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
105
+ The vector defining the hyperplane to reflect about.
106
+ Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
107
+ `complex128`.
108
+ is_non_singular: Expect that this operator is non-singular.
109
+ is_self_adjoint: Expect that this operator is equal to its hermitian
110
+ transpose. This is autoset to true
111
+ is_positive_definite: Expect that this operator is positive definite,
112
+ meaning the quadratic form `x^H A x` has positive real part for all
113
+ nonzero `x`. Note that we do not require the operator to be
114
+ self-adjoint to be positive-definite. See:
115
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
116
+ This is autoset to false.
117
+ is_square: Expect that this operator acts like square [batch] matrices.
118
+ This is autoset to true.
119
+ name: A name for this `LinearOperator`.
120
+
121
+ Raises:
122
+ ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
123
+ not `False` or `is_square` is not `True`.
124
+ """
125
+ parameters = dict(
126
+ reflection_axis=reflection_axis,
127
+ is_non_singular=is_non_singular,
128
+ is_self_adjoint=is_self_adjoint,
129
+ is_positive_definite=is_positive_definite,
130
+ is_square=is_square,
131
+ name=name
132
+ )
133
+
134
+ with ops.name_scope(name, values=[reflection_axis]):
135
+ self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
136
+ reflection_axis, name="reflection_axis")
137
+ self._check_reflection_axis(self._reflection_axis)
138
+
139
+ # Check and auto-set hints.
140
+ if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
141
+ raise ValueError("A Householder operator is always self adjoint.")
142
+ else:
143
+ is_self_adjoint = True
144
+
145
+ if is_positive_definite is True: # pylint:disable=g-bool-id-comparison
146
+ raise ValueError(
147
+ "A Householder operator is always non-positive definite.")
148
+ else:
149
+ is_positive_definite = False
150
+
151
+ if is_square is False: # pylint:disable=g-bool-id-comparison
152
+ raise ValueError("A Householder operator is always square.")
153
+ is_square = True
154
+
155
+ super(LinearOperatorHouseholder, self).__init__(
156
+ dtype=self._reflection_axis.dtype,
157
+ is_non_singular=is_non_singular,
158
+ is_self_adjoint=is_self_adjoint,
159
+ is_positive_definite=is_positive_definite,
160
+ is_square=is_square,
161
+ parameters=parameters,
162
+ name=name)
163
+
164
+ def _check_reflection_axis(self, reflection_axis):
165
+ """Static check of reflection_axis."""
166
+ if (reflection_axis.shape.ndims is not None and
167
+ reflection_axis.shape.ndims < 1):
168
+ raise ValueError(
169
+ "Argument reflection_axis must have at least 1 dimension. "
170
+ "Found: %s" % reflection_axis)
171
+
172
+ def _shape(self):
173
+ # If d_shape = [5, 3], we return [5, 3, 3].
174
+ d_shape = self._reflection_axis.shape
175
+ return d_shape.concatenate(d_shape[-1:])
176
+
177
+ def _shape_tensor(self):
178
+ d_shape = array_ops.shape(self._reflection_axis)
179
+ k = d_shape[-1]
180
+ return array_ops.concat((d_shape, [k]), 0)
181
+
182
+ def _assert_non_singular(self):
183
+ return control_flow_ops.no_op("assert_non_singular")
184
+
185
+ def _assert_positive_definite(self):
186
+ raise errors.InvalidArgumentError(
187
+ node_def=None, op=None, message="Householder operators are always "
188
+ "non-positive definite.")
189
+
190
+ def _assert_self_adjoint(self):
191
+ return control_flow_ops.no_op("assert_self_adjoint")
192
+
193
+ def _linop_adjoint(self) -> "LinearOperatorHouseholder":
194
+ return self
195
+
196
+ def _linop_inverse(self) -> "LinearOperatorHouseholder":
197
+ return self
198
+
199
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
200
+ # Given a vector `v`, we would like to reflect `x` about the hyperplane
201
+ # orthogonal to `v` going through the origin. We first project `x` to `v`
202
+ # to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
203
+ # projection about the hyperplane by flipping sign to get
204
+ # -v * dot(v, x) / dot(v, v). Finally, we can add back the component
205
+ # that is orthogonal to v. This is invariant under reflection, since the
206
+ # whole hyperplane is invariant. This component is equal to x - v * dot(v,
207
+ # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
208
+ # for the reflection.
209
+
210
+ # Note that because this is a reflection, it lies in O(n) (for real vector
211
+ # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
212
+ reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
213
+ self.reflection_axis
214
+ )
215
+ x = linalg.adjoint(x) if adjoint_arg else x
216
+ normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
217
+ mat = normalized_axis[..., array_ops.newaxis]
218
+ x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)
219
+
220
+ return x - 2 * mat * x_dot_normalized_v
221
+
222
+ def _trace(self):
223
+ # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
224
+ shape = self.shape_tensor()
225
+ return math_ops.cast(
226
+ self._domain_dimension_tensor(shape=shape) - 2,
227
+ self.dtype) * array_ops.ones(
228
+ shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
229
+
230
+ def _determinant(self):
231
+ # For householder transformations, the determinant is -1.
232
+ return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) # pylint: disable=invalid-unary-operand-type
233
+
234
+ def _log_abs_determinant(self):
235
+ # Orthogonal matrix -> log|Q| = 0.
236
+ return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
237
+
238
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
239
+ # A householder reflection is a reflection, hence is idempotent. Thus we
240
+ # can just apply a matmul.
241
+ return self._matmul(rhs, adjoint, adjoint_arg)
242
+
243
+ def _to_dense(self):
244
+ reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
245
+ self.reflection_axis
246
+ )
247
+ normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
248
+ mat = normalized_axis[..., array_ops.newaxis]
249
+ matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
250
+ return array_ops.matrix_set_diag(
251
+ matrix, 1. + array_ops.matrix_diag_part(matrix))
252
+
253
+ def _diag_part(self):
254
+ reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
255
+ self.reflection_axis
256
+ )
257
+ normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
258
+ return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
259
+
260
+ def _eigvals(self):
261
+ # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
262
+ result_shape = array_ops.shape(self.reflection_axis)
263
+ n = result_shape[-1]
264
+ ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
265
+ neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
266
+ eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
267
+ eigvals = array_ops.concat(
268
+ [-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1) # pylint: disable=invalid-unary-operand-type
269
+ return eigvals
270
+
271
+ def _cond(self):
272
+ # Householder matrices are rotations which have condition number 1.
273
+ return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
274
+
275
+ @property
276
+ def reflection_axis(self):
277
+ return self._reflection_axis
278
+
279
+ @property
280
+ def _composite_tensor_fields(self):
281
+ return ("reflection_axis",)
282
+
283
+ @property
284
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
285
+ return {"reflection_axis": 1}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_identity.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`LinearOperator` acting like the identity matrix."""
16
+
17
+ import numpy as np
18
+
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.framework import ops
21
+ from tensorflow.python.framework import tensor_conversion
22
+ from tensorflow.python.framework import tensor_shape
23
+ from tensorflow.python.framework import tensor_util
24
+ from tensorflow.python.ops import array_ops
25
+ from tensorflow.python.ops import array_ops_stack
26
+ from tensorflow.python.ops import check_ops
27
+ from tensorflow.python.ops import control_flow_ops
28
+ from tensorflow.python.ops import math_ops
29
+ from tensorflow.python.ops.linalg import linalg_impl as linalg
30
+ from tensorflow.python.ops.linalg import linear_operator
31
+ from tensorflow.python.ops.linalg import linear_operator_diag
32
+ from tensorflow.python.ops.linalg import linear_operator_util
33
+ from tensorflow.python.ops.linalg import property_hint_util
34
+ from tensorflow.python.util.tf_export import tf_export
35
+
36
+ __all__ = [
37
+ "LinearOperatorIdentity",
38
+ "LinearOperatorScaledIdentity",
39
+ ]
40
+
41
+
42
+ class BaseLinearOperatorIdentity(linear_operator.LinearOperator):
43
+ """Base class for Identity operators."""
44
+
45
+ def _check_num_rows_possibly_add_asserts(self):
46
+ """Static check of init arg `num_rows`, possibly add asserts."""
47
+ # Possibly add asserts.
48
+ if self._assert_proper_shapes:
49
+ self._num_rows = control_flow_ops.with_dependencies([
50
+ check_ops.assert_rank(
51
+ self._num_rows,
52
+ 0,
53
+ message="Argument num_rows must be a 0-D Tensor."),
54
+ check_ops.assert_non_negative(
55
+ self._num_rows,
56
+ message="Argument num_rows must be non-negative."),
57
+ ], self._num_rows)
58
+
59
+ # Static checks.
60
+ if not self._num_rows.dtype.is_integer:
61
+ raise TypeError("Argument num_rows must be integer type. Found:"
62
+ " %s" % self._num_rows)
63
+
64
+ num_rows_static = self._num_rows_static
65
+
66
+ if num_rows_static is None:
67
+ return # Cannot do any other static checks.
68
+
69
+ if num_rows_static.ndim != 0:
70
+ raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
71
+ " %s" % num_rows_static)
72
+
73
+ if num_rows_static < 0:
74
+ raise ValueError("Argument num_rows must be non-negative. Found:"
75
+ " %s" % num_rows_static)
76
+
77
+ def _min_matrix_dim(self):
78
+ """Minimum of domain/range dimension, if statically available, else None."""
79
+ domain_dim = tensor_shape.dimension_value(self.domain_dimension)
80
+ range_dim = tensor_shape.dimension_value(self.range_dimension)
81
+ if domain_dim is None or range_dim is None:
82
+ return None
83
+ return min(domain_dim, range_dim)
84
+
85
+ def _min_matrix_dim_tensor(self):
86
+ """Minimum of domain/range dimension, as a tensor."""
87
+ return math_ops.reduce_min(self.shape_tensor()[-2:])
88
+
89
+ def _ones_diag(self):
90
+ """Returns the diagonal of this operator as all ones."""
91
+ if self.shape.is_fully_defined():
92
+ d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
93
+ else:
94
+ d_shape = array_ops.concat(
95
+ [self.batch_shape_tensor(),
96
+ [self._min_matrix_dim_tensor()]], axis=0)
97
+
98
+ return array_ops.ones(shape=d_shape, dtype=self.dtype)
99
+
100
+
101
+ @tf_export("linalg.LinearOperatorIdentity")
102
+ @linear_operator.make_composite_tensor
103
+ class LinearOperatorIdentity(BaseLinearOperatorIdentity):
104
+ """`LinearOperator` acting like a [batch] square identity matrix.
105
+
106
+ This operator acts like a [batch] identity matrix `A` with shape
107
+ `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
108
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
109
+ an `N x N` matrix. This matrix `A` is not materialized, but for
110
+ purposes of broadcasting this shape will be relevant.
111
+
112
+ `LinearOperatorIdentity` is initialized with `num_rows`, and optionally
113
+ `batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this
114
+ operator efficiently passes through all arguments. If `batch_shape` is
115
+ provided, broadcasting may occur, which will require making copies.
116
+
117
+ ```python
118
+ # Create a 2 x 2 identity matrix.
119
+ operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32)
120
+
121
+ operator.to_dense()
122
+ ==> [[1., 0.]
123
+ [0., 1.]]
124
+
125
+ operator.shape
126
+ ==> [2, 2]
127
+
128
+ operator.log_abs_determinant()
129
+ ==> 0.
130
+
131
+ x = ... Shape [2, 4] Tensor
132
+ operator.matmul(x)
133
+ ==> Shape [2, 4] Tensor, same as x.
134
+
135
+ y = tf.random.normal(shape=[3, 2, 4])
136
+ # Note that y.shape is compatible with operator.shape because operator.shape
137
+ # is broadcast to [3, 2, 2].
138
+ # This broadcast does NOT require copying data, since we can infer that y
139
+ # will be passed through without changing shape. We are always able to infer
140
+ # this if the operator has no batch_shape.
141
+ x = operator.solve(y)
142
+ ==> Shape [3, 2, 4] Tensor, same as y.
143
+
144
+ # Create a 2-batch of 2x2 identity matrices
145
+ operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2])
146
+ operator.to_dense()
147
+ ==> [[[1., 0.]
148
+ [0., 1.]],
149
+ [[1., 0.]
150
+ [0., 1.]]]
151
+
152
+ # Here, even though the operator has a batch shape, the input is the same as
153
+ # the output, so x can be passed through without a copy. The operator is able
154
+ # to detect that no broadcast is necessary because both x and the operator
155
+ # have statically defined shape.
156
+ x = ... Shape [2, 2, 3]
157
+ operator.matmul(x)
158
+ ==> Shape [2, 2, 3] Tensor, same as x
159
+
160
+ # Here the operator and x have different batch_shape, and are broadcast.
161
+ # This requires a copy, since the output is different size than the input.
162
+ x = ... Shape [1, 2, 3]
163
+ operator.matmul(x)
164
+ ==> Shape [2, 2, 3] Tensor, equal to [x, x]
165
+ ```
166
+
167
+ ### Shape compatibility
168
+
169
+ This operator acts on [batch] matrix with compatible shape.
170
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
171
+
172
+ ```
173
+ operator.shape = [B1,...,Bb] + [N, N], with b >= 0
174
+ x.shape = [C1,...,Cc] + [N, R],
175
+ and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
176
+ ```
177
+
178
+ ### Performance
179
+
180
+ If `batch_shape` initialization arg is `None`:
181
+
182
+ * `operator.matmul(x)` is `O(1)`
183
+ * `operator.solve(x)` is `O(1)`
184
+ * `operator.determinant()` is `O(1)`
185
+
186
+ If `batch_shape` initialization arg is provided, and static checks cannot
187
+ rule out the need to broadcast:
188
+
189
+ * `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
190
+ * `operator.solve(x)` is `O(D1*...*Dd*N*R)`
191
+ * `operator.determinant()` is `O(B1*...*Bb)`
192
+
193
+ #### Matrix property hints
194
+
195
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
196
+ for `X = non_singular, self_adjoint, positive_definite, square`.
197
+ These have the following meaning:
198
+
199
+ * If `is_X == True`, callers should expect the operator to have the
200
+ property `X`. This is a promise that should be fulfilled, but is *not* a
201
+ runtime assert. For example, finite floating point precision may result
202
+ in these promises being violated.
203
+ * If `is_X == False`, callers should expect the operator to not have `X`.
204
+ * If `is_X == None` (the default), callers should have no expectation either
205
+ way.
206
+ """
207
+
208
+ def __init__(self,
209
+ num_rows,
210
+ batch_shape=None,
211
+ dtype=None,
212
+ is_non_singular=True,
213
+ is_self_adjoint=True,
214
+ is_positive_definite=True,
215
+ is_square=True,
216
+ assert_proper_shapes=False,
217
+ name="LinearOperatorIdentity"):
218
+ r"""Initialize a `LinearOperatorIdentity`.
219
+
220
+ The `LinearOperatorIdentity` is initialized with arguments defining `dtype`
221
+ and shape.
222
+
223
+ This operator is able to broadcast the leading (batch) dimensions, which
224
+ sometimes requires copying data. If `batch_shape` is `None`, the operator
225
+ can take arguments of any batch shape without copying. See examples.
226
+
227
+ Args:
228
+ num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
229
+ corresponding identity matrix.
230
+ batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
231
+ dimensions. If `None`, this operator has no leading dimensions.
232
+ dtype: Data type of the matrix that this operator represents.
233
+ is_non_singular: Expect that this operator is non-singular.
234
+ is_self_adjoint: Expect that this operator is equal to its hermitian
235
+ transpose.
236
+ is_positive_definite: Expect that this operator is positive definite,
237
+ meaning the quadratic form `x^H A x` has positive real part for all
238
+ nonzero `x`. Note that we do not require the operator to be
239
+ self-adjoint to be positive-definite. See:
240
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
241
+ is_square: Expect that this operator acts like square [batch] matrices.
242
+ assert_proper_shapes: Python `bool`. If `False`, only perform static
243
+ checks that initialization and method arguments have proper shape.
244
+ If `True`, and static checks are inconclusive, add asserts to the graph.
245
+ name: A name for this `LinearOperator`
246
+
247
+ Raises:
248
+ ValueError: If `num_rows` is determined statically to be non-scalar, or
249
+ negative.
250
+ ValueError: If `batch_shape` is determined statically to not be 1-D, or
251
+ negative.
252
+ ValueError: If any of the following is not `True`:
253
+ `{is_self_adjoint, is_non_singular, is_positive_definite}`.
254
+ TypeError: If `num_rows` or `batch_shape` is ref-type (e.g. Variable).
255
+ """
256
+ parameters = dict(
257
+ num_rows=num_rows,
258
+ batch_shape=batch_shape,
259
+ dtype=dtype,
260
+ is_non_singular=is_non_singular,
261
+ is_self_adjoint=is_self_adjoint,
262
+ is_positive_definite=is_positive_definite,
263
+ is_square=is_square,
264
+ assert_proper_shapes=assert_proper_shapes,
265
+ name=name)
266
+
267
+ dtype = dtype or dtypes.float32
268
+ self._assert_proper_shapes = assert_proper_shapes
269
+
270
+ with ops.name_scope(name):
271
+ dtype = dtypes.as_dtype(dtype)
272
+ if not is_self_adjoint:
273
+ raise ValueError("An identity operator is always self adjoint.")
274
+ if not is_non_singular:
275
+ raise ValueError("An identity operator is always non-singular.")
276
+ if not is_positive_definite:
277
+ raise ValueError("An identity operator is always positive-definite.")
278
+ if not is_square:
279
+ raise ValueError("An identity operator is always square.")
280
+
281
+ super(LinearOperatorIdentity, self).__init__(
282
+ dtype=dtype,
283
+ is_non_singular=is_non_singular,
284
+ is_self_adjoint=is_self_adjoint,
285
+ is_positive_definite=is_positive_definite,
286
+ is_square=is_square,
287
+ parameters=parameters,
288
+ name=name)
289
+
290
+ linear_operator_util.assert_not_ref_type(num_rows, "num_rows")
291
+ linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape")
292
+
293
+ self._num_rows = linear_operator_util.shape_tensor(
294
+ num_rows, name="num_rows")
295
+ self._num_rows_static = tensor_util.constant_value(self._num_rows)
296
+ self._check_num_rows_possibly_add_asserts()
297
+
298
+ if batch_shape is None:
299
+ self._batch_shape_arg = None
300
+ else:
301
+ self._batch_shape_arg = linear_operator_util.shape_tensor(
302
+ batch_shape, name="batch_shape_arg")
303
+ self._batch_shape_static = tensor_util.constant_value(
304
+ self._batch_shape_arg)
305
+ self._check_batch_shape_possibly_add_asserts()
306
+
307
+ def _shape(self):
308
+ matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
309
+ self._num_rows_static))
310
+ if self._batch_shape_arg is None:
311
+ return matrix_shape
312
+
313
+ batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
314
+ return batch_shape.concatenate(matrix_shape)
315
+
316
+ def _shape_tensor(self):
317
+ matrix_shape = array_ops_stack.stack(
318
+ (self._num_rows, self._num_rows), axis=0)
319
+ if self._batch_shape_arg is None:
320
+ return matrix_shape
321
+
322
+ return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
323
+
324
+ def _linop_adjoint(self) -> "LinearOperatorIdentity":
325
+ return self
326
+
327
+ def _linop_cholesky(self) -> "LinearOperatorIdentity":
328
+ return LinearOperatorIdentity(
329
+ num_rows=self._num_rows, # pylint: disable=protected-access
330
+ batch_shape=self.batch_shape,
331
+ dtype=self.dtype,
332
+ is_non_singular=True,
333
+ is_self_adjoint=True,
334
+ is_positive_definite=True,
335
+ is_square=True)
336
+
337
+ def _linop_inverse(self) -> "LinearOperatorIdentity":
338
+ return self
339
+
340
+ def _linop_matmul(
341
+ self,
342
+ left_operator: "LinearOperatorIdentity",
343
+ right_operator: linear_operator.LinearOperator,
344
+ ) -> "LinearOperatorIdentity":
345
+ del left_operator
346
+ return right_operator
347
+
348
+ def _linop_solve(
349
+ self,
350
+ left_operator: "LinearOperatorIdentity",
351
+ right_operator: linear_operator.LinearOperator,
352
+ ) -> linear_operator.LinearOperator:
353
+ del left_operator
354
+ return right_operator
355
+
356
+ def _assert_non_singular(self):
357
+ return control_flow_ops.no_op("assert_non_singular")
358
+
359
+ def _assert_positive_definite(self):
360
+ return control_flow_ops.no_op("assert_positive_definite")
361
+
362
+ def _assert_self_adjoint(self):
363
+ return control_flow_ops.no_op("assert_self_adjoint")
364
+
365
+ def _possibly_broadcast_batch_shape(self, x):
366
+ """Return 'x', possibly after broadcasting the leading dimensions."""
367
+ # If we have no batch shape, our batch shape broadcasts with everything!
368
+ if self._batch_shape_arg is None:
369
+ return x
370
+
371
+ # Static attempt:
372
+ # If we determine that no broadcast is necessary, pass x through
373
+ # If we need a broadcast, add to an array of zeros.
374
+ #
375
+ # special_shape is the shape that, when broadcast with x's shape, will give
376
+ # the correct broadcast_shape. Note that
377
+ # We have already verified the second to last dimension of self.shape
378
+ # matches x's shape in assert_compatible_matrix_dimensions.
379
+ # Also, the final dimension of 'x' can have any shape.
380
+ # Therefore, the final two dimensions of special_shape are 1's.
381
+ special_shape = self.batch_shape.concatenate([1, 1])
382
+ bshape = array_ops.broadcast_static_shape(x.shape, special_shape)
383
+ if special_shape.is_fully_defined():
384
+ # bshape.is_fully_defined iff special_shape.is_fully_defined.
385
+ if bshape == x.shape:
386
+ return x
387
+ # Use the built in broadcasting of addition.
388
+ zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
389
+ return x + zeros
390
+
391
+ # Dynamic broadcast:
392
+ # Always add to an array of zeros, rather than using a "cond", since a
393
+ # cond would require copying data from GPU --> CPU.
394
+ special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
395
+ zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
396
+ return x + zeros
397
+
398
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
399
+ # Note that adjoint has no effect since this matrix is self-adjoint.
400
+ x = linalg.adjoint(x) if adjoint_arg else x
401
+ if self._assert_proper_shapes:
402
+ aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
403
+ x = control_flow_ops.with_dependencies([aps], x)
404
+ return self._possibly_broadcast_batch_shape(x)
405
+
406
+ def _determinant(self):
407
+ return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
408
+
409
+ def _log_abs_determinant(self):
410
+ return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
411
+
412
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
413
+ return self._matmul(rhs, adjoint_arg=adjoint_arg)
414
+
415
+ def _trace(self):
416
+ # Get Tensor of all ones of same shape as self.batch_shape.
417
+ if self.batch_shape.is_fully_defined():
418
+ batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
419
+ else:
420
+ batch_of_ones = array_ops.ones(
421
+ shape=self.batch_shape_tensor(), dtype=self.dtype)
422
+
423
+ if self._min_matrix_dim() is not None:
424
+ return self._min_matrix_dim() * batch_of_ones
425
+ else:
426
+ return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) *
427
+ batch_of_ones)
428
+
429
+ def _diag_part(self):
430
+ return self._ones_diag()
431
+
432
+ def add_to_tensor(self, mat, name="add_to_tensor"):
433
+ """Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
434
+
435
+ Args:
436
+ mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
437
+ name: A name to give this `Op`.
438
+
439
+ Returns:
440
+ A `Tensor` with broadcast shape and same `dtype` as `self`.
441
+ """
442
+ with self._name_scope(name): # pylint: disable=not-callable
443
+ mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(
444
+ mat, name="mat"
445
+ )
446
+ mat_diag = array_ops.matrix_diag_part(mat)
447
+ new_diag = 1 + mat_diag
448
+ return array_ops.matrix_set_diag(mat, new_diag)
449
+
450
+ def _eigvals(self):
451
+ return self._ones_diag()
452
+
453
+ def _cond(self):
454
+ return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
455
+
456
+ def _check_num_rows_possibly_add_asserts(self):
457
+ """Static check of init arg `num_rows`, possibly add asserts."""
458
+ # Possibly add asserts.
459
+ if self._assert_proper_shapes:
460
+ self._num_rows = control_flow_ops.with_dependencies([
461
+ check_ops.assert_rank(
462
+ self._num_rows,
463
+ 0,
464
+ message="Argument num_rows must be a 0-D Tensor."),
465
+ check_ops.assert_non_negative(
466
+ self._num_rows,
467
+ message="Argument num_rows must be non-negative."),
468
+ ], self._num_rows)
469
+
470
+ # Static checks.
471
+ if not self._num_rows.dtype.is_integer:
472
+ raise TypeError("Argument num_rows must be integer type. Found:"
473
+ " %s" % self._num_rows)
474
+
475
+ num_rows_static = self._num_rows_static
476
+
477
+ if num_rows_static is None:
478
+ return # Cannot do any other static checks.
479
+
480
+ if num_rows_static.ndim != 0:
481
+ raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
482
+ " %s" % num_rows_static)
483
+
484
+ if num_rows_static < 0:
485
+ raise ValueError("Argument num_rows must be non-negative. Found:"
486
+ " %s" % num_rows_static)
487
+
488
+ def _check_batch_shape_possibly_add_asserts(self):
489
+ """Static check of init arg `batch_shape`, possibly add asserts."""
490
+ if self._batch_shape_arg is None:
491
+ return
492
+
493
+ # Possibly add asserts
494
+ if self._assert_proper_shapes:
495
+ self._batch_shape_arg = control_flow_ops.with_dependencies([
496
+ check_ops.assert_rank(
497
+ self._batch_shape_arg,
498
+ 1,
499
+ message="Argument batch_shape must be a 1-D Tensor."),
500
+ check_ops.assert_non_negative(
501
+ self._batch_shape_arg,
502
+ message="Argument batch_shape must be non-negative."),
503
+ ], self._batch_shape_arg)
504
+
505
+ # Static checks
506
+ if not self._batch_shape_arg.dtype.is_integer:
507
+ raise TypeError("Argument batch_shape must be integer type. Found:"
508
+ " %s" % self._batch_shape_arg)
509
+
510
+ if self._batch_shape_static is None:
511
+ return # Cannot do any other static checks.
512
+
513
+ if self._batch_shape_static.ndim != 1:
514
+ raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
515
+ " %s" % self._batch_shape_static)
516
+
517
+ if np.any(self._batch_shape_static < 0):
518
+ raise ValueError("Argument batch_shape must be non-negative. Found:"
519
+ "%s" % self._batch_shape_static)
520
+
521
+ @property
522
+ def _composite_tensor_prefer_static_fields(self):
523
+ return ("num_rows", "batch_shape")
524
+
525
+ @property
526
+ def _composite_tensor_fields(self):
527
+ return ("num_rows", "batch_shape", "dtype", "assert_proper_shapes")
528
+
529
+ def __getitem__(self, slices):
530
+ # Slice the batch shape and return a new LinearOperatorIdentity.
531
+ # Use a proxy shape and slice it. Use this as the new batch shape
532
+ new_batch_shape = array_ops.shape(
533
+ array_ops.ones(self._batch_shape_arg)[slices])
534
+ parameters = dict(self.parameters, batch_shape=new_batch_shape)
535
+ return LinearOperatorIdentity(**parameters)
536
+
537
+
538
+ @tf_export("linalg.LinearOperatorScaledIdentity")
539
+ @linear_operator.make_composite_tensor
540
+ class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity):
541
+ """`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`.
542
+
543
+ This operator acts like a scaled [batch] identity matrix `A` with shape
544
+ `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
545
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
546
+ a scaled version of the `N x N` identity matrix.
547
+
548
+ `LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier`
549
+ (a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the
550
+ `multiplier` determines the scale for each batch member.
551
+
552
+ ```python
553
+ # Create a 2 x 2 scaled identity matrix.
554
+ operator = LinearOperatorIdentity(num_rows=2, multiplier=3.)
555
+
556
+ operator.to_dense()
557
+ ==> [[3., 0.]
558
+ [0., 3.]]
559
+
560
+ operator.shape
561
+ ==> [2, 2]
562
+
563
+ operator.log_abs_determinant()
564
+ ==> 2 * Log[3]
565
+
566
+ x = ... Shape [2, 4] Tensor
567
+ operator.matmul(x)
568
+ ==> 3 * x
569
+
570
+ y = tf.random.normal(shape=[3, 2, 4])
571
+ # Note that y.shape is compatible with operator.shape because operator.shape
572
+ # is broadcast to [3, 2, 2].
573
+ x = operator.solve(y)
574
+ ==> 3 * x
575
+
576
+ # Create a 2-batch of 2x2 identity matrices
577
+ operator = LinearOperatorIdentity(num_rows=2, multiplier=5.)
578
+ operator.to_dense()
579
+ ==> [[[5., 0.]
580
+ [0., 5.]],
581
+ [[5., 0.]
582
+ [0., 5.]]]
583
+
584
+ x = ... Shape [2, 2, 3]
585
+ operator.matmul(x)
586
+ ==> 5 * x
587
+
588
+ # Here the operator and x have different batch_shape, and are broadcast.
589
+ x = ... Shape [1, 2, 3]
590
+ operator.matmul(x)
591
+ ==> 5 * x
592
+ ```
593
+
594
+ ### Shape compatibility
595
+
596
+ This operator acts on [batch] matrix with compatible shape.
597
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
598
+
599
+ ```
600
+ operator.shape = [B1,...,Bb] + [N, N], with b >= 0
601
+ x.shape = [C1,...,Cc] + [N, R],
602
+ and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
603
+ ```
604
+
605
+ ### Performance
606
+
607
+ * `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
608
+ * `operator.solve(x)` is `O(D1*...*Dd*N*R)`
609
+ * `operator.determinant()` is `O(D1*...*Dd)`
610
+
611
+ #### Matrix property hints
612
+
613
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
614
+ for `X = non_singular, self_adjoint, positive_definite, square`.
615
+ These have the following meaning
616
+ * If `is_X == True`, callers should expect the operator to have the
617
+ property `X`. This is a promise that should be fulfilled, but is *not* a
618
+ runtime assert. For example, finite floating point precision may result
619
+ in these promises being violated.
620
+ * If `is_X == False`, callers should expect the operator to not have `X`.
621
+ * If `is_X == None` (the default), callers should have no expectation either
622
+ way.
623
+ """
624
+
625
+ def __init__(self,
626
+ num_rows,
627
+ multiplier,
628
+ is_non_singular=None,
629
+ is_self_adjoint=None,
630
+ is_positive_definite=None,
631
+ is_square=True,
632
+ assert_proper_shapes=False,
633
+ name="LinearOperatorScaledIdentity"):
634
+ r"""Initialize a `LinearOperatorScaledIdentity`.
635
+
636
+ The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which
637
+ determines the size of each identity matrix, and a `multiplier`,
638
+ which defines `dtype`, batch shape, and scale of each matrix.
639
+
640
+ This operator is able to broadcast the leading (batch) dimensions.
641
+
642
+ Args:
643
+ num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
644
+ corresponding identity matrix.
645
+ multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).
646
+ is_non_singular: Expect that this operator is non-singular.
647
+ is_self_adjoint: Expect that this operator is equal to its hermitian
648
+ transpose.
649
+ is_positive_definite: Expect that this operator is positive definite,
650
+ meaning the quadratic form `x^H A x` has positive real part for all
651
+ nonzero `x`. Note that we do not require the operator to be
652
+ self-adjoint to be positive-definite. See:
653
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
654
+ is_square: Expect that this operator acts like square [batch] matrices.
655
+ assert_proper_shapes: Python `bool`. If `False`, only perform static
656
+ checks that initialization and method arguments have proper shape.
657
+ If `True`, and static checks are inconclusive, add asserts to the graph.
658
+ name: A name for this `LinearOperator`
659
+
660
+ Raises:
661
+ ValueError: If `num_rows` is determined statically to be non-scalar, or
662
+ negative.
663
+ """
664
+ parameters = dict(
665
+ num_rows=num_rows,
666
+ multiplier=multiplier,
667
+ is_non_singular=is_non_singular,
668
+ is_self_adjoint=is_self_adjoint,
669
+ is_positive_definite=is_positive_definite,
670
+ is_square=is_square,
671
+ assert_proper_shapes=assert_proper_shapes,
672
+ name=name)
673
+
674
+ self._assert_proper_shapes = assert_proper_shapes
675
+
676
+ with ops.name_scope(name, values=[multiplier, num_rows]):
677
+ self._multiplier = linear_operator_util.convert_nonref_to_tensor(
678
+ multiplier, name="multiplier")
679
+
680
+ # Check and auto-set hints.
681
+ if not self._multiplier.dtype.is_complex:
682
+ if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison
683
+ raise ValueError("A real diagonal operator is always self adjoint.")
684
+ else:
685
+ is_self_adjoint = True
686
+
687
+ if not is_square:
688
+ raise ValueError("A ScaledIdentity operator is always square.")
689
+
690
+ linear_operator_util.assert_not_ref_type(num_rows, "num_rows")
691
+
692
+ super(LinearOperatorScaledIdentity, self).__init__(
693
+ dtype=self._multiplier.dtype.base_dtype,
694
+ is_non_singular=is_non_singular,
695
+ is_self_adjoint=is_self_adjoint,
696
+ is_positive_definite=is_positive_definite,
697
+ is_square=is_square,
698
+ parameters=parameters,
699
+ name=name)
700
+
701
+ self._num_rows = linear_operator_util.shape_tensor(
702
+ num_rows, name="num_rows")
703
+ self._num_rows_static = tensor_util.constant_value(self._num_rows)
704
+ self._check_num_rows_possibly_add_asserts()
705
+ self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)
706
+ self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows,
707
+ self.dtype.real_dtype)
708
+
709
+ def _shape(self):
710
+ matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
711
+ self._num_rows_static))
712
+
713
+ batch_shape = self.multiplier.shape
714
+ return batch_shape.concatenate(matrix_shape)
715
+
716
+ def _shape_tensor(self):
717
+ matrix_shape = array_ops_stack.stack(
718
+ (self._num_rows, self._num_rows), axis=0)
719
+
720
+ batch_shape = array_ops.shape(self.multiplier)
721
+ return array_ops.concat((batch_shape, matrix_shape), 0)
722
+
723
+ def _assert_non_singular(self):
724
+ return check_ops.assert_positive(
725
+ math_ops.abs(self.multiplier), message="LinearOperator was singular")
726
+
727
+ def _assert_positive_definite(self):
728
+ return check_ops.assert_positive(
729
+ math_ops.real(self.multiplier),
730
+ message="LinearOperator was not positive definite.")
731
+
732
+ def _assert_self_adjoint(self):
733
+ imag_multiplier = math_ops.imag(self.multiplier)
734
+ return check_ops.assert_equal(
735
+ array_ops.zeros_like(imag_multiplier),
736
+ imag_multiplier,
737
+ message="LinearOperator was not self-adjoint")
738
+
739
+ def _make_multiplier_matrix(self, conjugate=False):
740
+ # Shape [B1,...Bb, 1, 1]
741
+ multiplier_matrix = array_ops.expand_dims(
742
+ array_ops.expand_dims(self.multiplier, -1), -1)
743
+ if conjugate:
744
+ multiplier_matrix = math_ops.conj(multiplier_matrix)
745
+ return multiplier_matrix
746
+
747
+ def _linop_adjoint(self) -> "LinearOperatorScaledIdentity":
748
+ multiplier = self.multiplier
749
+ if multiplier.dtype.is_complex:
750
+ multiplier = math_ops.conj(multiplier)
751
+
752
+ return LinearOperatorScaledIdentity(
753
+ num_rows=self._num_rows,
754
+ multiplier=multiplier,
755
+ is_non_singular=self.is_non_singular,
756
+ is_self_adjoint=self.is_self_adjoint,
757
+ is_positive_definite=self.is_positive_definite,
758
+ is_square=True)
759
+
760
+ def _linop_cholesky(self) -> "LinearOperatorScaledIdentity":
761
+ return LinearOperatorScaledIdentity(
762
+ num_rows=self._num_rows,
763
+ multiplier=math_ops.sqrt(self.multiplier),
764
+ is_non_singular=True,
765
+ is_self_adjoint=True,
766
+ is_positive_definite=True,
767
+ is_square=True)
768
+
769
+ def _linop_inverse(self) -> "LinearOperatorScaledIdentity":
770
+ return LinearOperatorScaledIdentity(
771
+ num_rows=self._num_rows,
772
+ multiplier=1. / self.multiplier,
773
+ is_non_singular=self.is_non_singular,
774
+ is_self_adjoint=True,
775
+ is_positive_definite=self.is_positive_definite,
776
+ is_square=True)
777
+
778
+ def _linop_matmul(
779
+ self,
780
+ left_operator: "LinearOperatorScaledIdentity",
781
+ right_operator: linear_operator.LinearOperator,
782
+ ) -> "LinearOperatorScaledIdentity":
783
+ is_non_singular = property_hint_util.combined_non_singular_hint(
784
+ left_operator, right_operator)
785
+ is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint(
786
+ left_operator, right_operator)
787
+ is_positive_definite = (
788
+ property_hint_util.combined_commuting_positive_definite_hint(
789
+ left_operator, right_operator))
790
+ if isinstance(right_operator, LinearOperatorScaledIdentity):
791
+ return LinearOperatorScaledIdentity(
792
+ num_rows=left_operator.domain_dimension_tensor(),
793
+ multiplier=left_operator.multiplier * right_operator.multiplier,
794
+ is_non_singular=is_non_singular,
795
+ is_self_adjoint=is_self_adjoint,
796
+ is_positive_definite=is_positive_definite,
797
+ is_square=True)
798
+ elif isinstance(right_operator, linear_operator_diag.LinearOperatorDiag):
799
+ return linear_operator_diag.LinearOperatorDiag(
800
+ diag=right_operator.diag * left_operator.multiplier,
801
+ is_non_singular=is_non_singular,
802
+ is_self_adjoint=is_self_adjoint,
803
+ is_positive_definite=is_positive_definite,
804
+ is_square=True)
805
+ else:
806
+ return super()._linop_matmul(left_operator, right_operator)
807
+
808
+ def _linop_solve(
809
+ self,
810
+ left_operator: "LinearOperatorScaledIdentity",
811
+ right_operator: linear_operator.LinearOperator,
812
+ ) -> linear_operator.LinearOperator:
813
+ is_non_singular = property_hint_util.combined_non_singular_hint(
814
+ left_operator, right_operator)
815
+ is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint(
816
+ left_operator, right_operator)
817
+ is_positive_definite = (
818
+ property_hint_util.combined_commuting_positive_definite_hint(
819
+ left_operator, right_operator))
820
+ if isinstance(right_operator, LinearOperatorScaledIdentity):
821
+ return LinearOperatorScaledIdentity(
822
+ num_rows=left_operator.domain_dimension_tensor(),
823
+ multiplier=right_operator.multiplier / left_operator.multiplier,
824
+ is_non_singular=is_non_singular,
825
+ is_self_adjoint=is_self_adjoint,
826
+ is_positive_definite=is_positive_definite,
827
+ is_square=True)
828
+ elif isinstance(right_operator, linear_operator_diag.LinearOperatorDiag):
829
+ return linear_operator_diag.LinearOperatorDiag(
830
+ diag=right_operator.diag / left_operator.multiplier,
831
+ is_non_singular=is_non_singular,
832
+ is_self_adjoint=is_self_adjoint,
833
+ is_positive_definite=is_positive_definite,
834
+ is_square=True)
835
+ else:
836
+ return super()._linop_solve(left_operator, right_operator)
837
+
838
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
839
+ x = linalg.adjoint(x) if adjoint_arg else x
840
+ if self._assert_proper_shapes:
841
+ aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
842
+ x = control_flow_ops.with_dependencies([aps], x)
843
+ return x * self._make_multiplier_matrix(conjugate=adjoint)
844
+
845
+ def _determinant(self):
846
+ return self.multiplier**self._num_rows_cast_to_dtype
847
+
848
+ def _log_abs_determinant(self):
849
+ return self._num_rows_cast_to_real_dtype * math_ops.log(
850
+ math_ops.abs(self.multiplier))
851
+
852
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
853
+ rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
854
+ if self._assert_proper_shapes:
855
+ aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)
856
+ rhs = control_flow_ops.with_dependencies([aps], rhs)
857
+ return rhs / self._make_multiplier_matrix(conjugate=adjoint)
858
+
859
+ def _trace(self):
860
+ # Get Tensor of all ones of same shape as self.batch_shape.
861
+ if self.batch_shape.is_fully_defined():
862
+ batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
863
+ else:
864
+ batch_of_ones = array_ops.ones(
865
+ shape=self.batch_shape_tensor(), dtype=self.dtype)
866
+
867
+ if self._min_matrix_dim() is not None:
868
+ return self.multiplier * self._min_matrix_dim() * batch_of_ones
869
+ else:
870
+ return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(),
871
+ self.dtype) * batch_of_ones)
872
+
873
+ def _diag_part(self):
874
+ return self._ones_diag() * self.multiplier[..., array_ops.newaxis]
875
+
876
+ def add_to_tensor(self, mat, name="add_to_tensor"):
877
+ """Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
878
+
879
+ Args:
880
+ mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
881
+ name: A name to give this `Op`.
882
+
883
+ Returns:
884
+ A `Tensor` with broadcast shape and same `dtype` as `self`.
885
+ """
886
+ with self._name_scope(name): # pylint: disable=not-callable
887
+ # Shape [B1,...,Bb, 1]
888
+ multiplier_vector = array_ops.expand_dims(self.multiplier, -1)
889
+
890
+ # Shape [C1,...,Cc, M, M]
891
+ mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(
892
+ mat, name="mat"
893
+ )
894
+
895
+ # Shape [C1,...,Cc, M]
896
+ mat_diag = array_ops.matrix_diag_part(mat)
897
+
898
+ # multiplier_vector broadcasts here.
899
+ new_diag = multiplier_vector + mat_diag
900
+
901
+ return array_ops.matrix_set_diag(mat, new_diag)
902
+
903
+ def _eigvals(self):
904
+ return self._ones_diag() * self.multiplier[..., array_ops.newaxis]
905
+
906
+ def _cond(self):
907
+ # Condition number for a scalar time identity matrix is one, except when the
908
+ # scalar is zero.
909
+ return array_ops.where_v2(
910
+ math_ops.equal(self._multiplier, 0.),
911
+ math_ops.cast(np.nan, dtype=self.dtype),
912
+ math_ops.cast(1., dtype=self.dtype))
913
+
914
+ @property
915
+ def multiplier(self):
916
+ """The [batch] scalar `Tensor`, `c` in `cI`."""
917
+ return self._multiplier
918
+
919
+ @property
920
+ def _composite_tensor_prefer_static_fields(self):
921
+ return ("num_rows",)
922
+
923
+ @property
924
+ def _composite_tensor_fields(self):
925
+ return ("num_rows", "multiplier", "assert_proper_shapes")
926
+
927
+ @property
928
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
929
+ return {"multiplier": 0}
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_inversion.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Inverts a non-singular `LinearOperator`."""
16
+
17
+ from tensorflow.python.framework import ops
18
+ from tensorflow.python.ops.linalg import linear_operator
19
+ from tensorflow.python.ops.linalg import linear_operator_util
20
+ from tensorflow.python.util.tf_export import tf_export
21
+
22
+ __all__ = ["LinearOperatorInversion"]
23
+
24
+
25
+ @tf_export("linalg.LinearOperatorInversion")
26
+ @linear_operator.make_composite_tensor
27
+ class LinearOperatorInversion(linear_operator.LinearOperator):
28
+ """`LinearOperator` representing the inverse of another operator.
29
+
30
+ This operator represents the inverse of another operator.
31
+
32
+ ```python
33
+ # Create a 2 x 2 linear operator.
34
+ operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]])
35
+ operator_inv = LinearOperatorInversion(operator)
36
+
37
+ operator_inv.to_dense()
38
+ ==> [[1., 0.]
39
+ [0., 0.5]]
40
+
41
+ operator_inv.shape
42
+ ==> [2, 2]
43
+
44
+ operator_inv.log_abs_determinant()
45
+ ==> - log(2)
46
+
47
+ x = ... Shape [2, 4] Tensor
48
+ operator_inv.matmul(x)
49
+ ==> Shape [2, 4] Tensor, equal to operator.solve(x)
50
+ ```
51
+
52
+ #### Performance
53
+
54
+ The performance of `LinearOperatorInversion` depends on the underlying
55
+ operators performance: `solve` and `matmul` are swapped, and determinant is
56
+ inverted.
57
+
58
+ #### Matrix property hints
59
+
60
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
61
+ for `X = non_singular, self_adjoint, positive_definite, square`.
62
+ These have the following meaning:
63
+
64
+ * If `is_X == True`, callers should expect the operator to have the
65
+ property `X`. This is a promise that should be fulfilled, but is *not* a
66
+ runtime assert. For example, finite floating point precision may result
67
+ in these promises being violated.
68
+ * If `is_X == False`, callers should expect the operator to not have `X`.
69
+ * If `is_X == None` (the default), callers should have no expectation either
70
+ way.
71
+ """
72
+
73
+ def __init__(self,
74
+ operator,
75
+ is_non_singular=None,
76
+ is_self_adjoint=None,
77
+ is_positive_definite=None,
78
+ is_square=None,
79
+ name=None):
80
+ r"""Initialize a `LinearOperatorInversion`.
81
+
82
+ `LinearOperatorInversion` is initialized with an operator `A`. The `solve`
83
+ and `matmul` methods are effectively swapped. E.g.
84
+
85
+ ```
86
+ A = MyLinearOperator(...)
87
+ B = LinearOperatorInversion(A)
88
+ x = [....] # a vector
89
+
90
+ assert A.matvec(x) == B.solvevec(x)
91
+ ```
92
+
93
+ Args:
94
+ operator: `LinearOperator` object. If `operator.is_non_singular == False`,
95
+ an exception is raised. We do allow `operator.is_non_singular == None`,
96
+ in which case this operator will have `is_non_singular == None`.
97
+ Similarly for `is_self_adjoint` and `is_positive_definite`.
98
+ is_non_singular: Expect that this operator is non-singular.
99
+ is_self_adjoint: Expect that this operator is equal to its hermitian
100
+ transpose.
101
+ is_positive_definite: Expect that this operator is positive definite,
102
+ meaning the quadratic form `x^H A x` has positive real part for all
103
+ nonzero `x`. Note that we do not require the operator to be
104
+ self-adjoint to be positive-definite. See:
105
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
106
+ is_square: Expect that this operator acts like square [batch] matrices.
107
+ name: A name for this `LinearOperator`. Default is `operator.name +
108
+ "_inv"`.
109
+
110
+ Raises:
111
+ ValueError: If `operator.is_non_singular` is False.
112
+ """
113
+ parameters = dict(
114
+ operator=operator,
115
+ is_non_singular=is_non_singular,
116
+ is_self_adjoint=is_self_adjoint,
117
+ is_positive_definite=is_positive_definite,
118
+ is_square=is_square,
119
+ name=name
120
+ )
121
+
122
+ self._operator = operator
123
+
124
+ # Auto-set and check hints.
125
+ if operator.is_non_singular is False or is_non_singular is False:
126
+ raise ValueError(
127
+ f"Argument `is_non_singular` or argument `operator` must have "
128
+ f"supplied hint `is_non_singular` equal to `True` or `None`. "
129
+ f"Found `operator.is_non_singular`: {operator.is_non_singular}, "
130
+ f"`is_non_singular`: {is_non_singular}.")
131
+ if operator.is_square is False or is_square is False:
132
+ raise ValueError(
133
+ f"Argument `is_square` or argument `operator` must have supplied "
134
+ f"hint `is_square` equal to `True` or `None`. Found "
135
+ f"`operator.is_square`: {operator.is_square}, "
136
+ f"`is_square`: {is_square}.")
137
+
138
+ # The congruency of is_non_singular and is_self_adjoint was checked in the
139
+ # base operator. Other hints are, in this special case of inversion, ones
140
+ # that must be the same for base/derived operator.
141
+ combine_hint = (
142
+ linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
143
+
144
+ is_square = combine_hint(
145
+ operator, "is_square", is_square,
146
+ "An operator is square if and only if its inverse is square.")
147
+
148
+ is_non_singular = combine_hint(
149
+ operator, "is_non_singular", is_non_singular,
150
+ "An operator is non-singular if and only if its inverse is "
151
+ "non-singular.")
152
+
153
+ is_self_adjoint = combine_hint(
154
+ operator, "is_self_adjoint", is_self_adjoint,
155
+ "An operator is self-adjoint if and only if its inverse is "
156
+ "self-adjoint.")
157
+
158
+ is_positive_definite = combine_hint(
159
+ operator, "is_positive_definite", is_positive_definite,
160
+ "An operator is positive-definite if and only if its inverse is "
161
+ "positive-definite.")
162
+
163
+ # Initialization.
164
+ if name is None:
165
+ name = operator.name + "_inv"
166
+ with ops.name_scope(name):
167
+ super(LinearOperatorInversion, self).__init__(
168
+ dtype=operator.dtype,
169
+ is_non_singular=is_non_singular,
170
+ is_self_adjoint=is_self_adjoint,
171
+ is_positive_definite=is_positive_definite,
172
+ is_square=is_square,
173
+ parameters=parameters,
174
+ name=name)
175
+
176
+ @property
177
+ def operator(self) -> "LinearOperatorInversion":
178
+ """The operator before inversion."""
179
+ return self._operator
180
+
181
+ def _linop_inverse(self) -> linear_operator.LinearOperator:
182
+ return self.operator
183
+
184
+ def _linop_solve(
185
+ self,
186
+ left_operator: "LinearOperatorInversion",
187
+ right_operator: linear_operator.LinearOperator,
188
+ ) -> linear_operator.LinearOperator:
189
+ """Solve inverse of generic `LinearOperator`s."""
190
+ return left_operator.operator.matmul(right_operator)
191
+
192
+ def _assert_non_singular(self):
193
+ return self.operator.assert_non_singular()
194
+
195
+ def _assert_positive_definite(self):
196
+ return self.operator.assert_positive_definite()
197
+
198
+ def _assert_self_adjoint(self):
199
+ return self.operator.assert_self_adjoint()
200
+
201
+ def _shape(self):
202
+ return self.operator.shape
203
+
204
+ def _shape_tensor(self):
205
+ return self.operator.shape_tensor()
206
+
207
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
208
+ return self.operator.solve(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
209
+
210
+ def _determinant(self):
211
+ return 1. / self.operator.determinant()
212
+
213
+ def _log_abs_determinant(self):
214
+ return -1. * self.operator.log_abs_determinant()
215
+
216
+ def _solve(self, rhs, adjoint=False, adjoint_arg=False):
217
+ return self.operator.matmul(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
218
+
219
+ def _eigvals(self):
220
+ return 1. / self.operator.eigvals()
221
+
222
+ def _cond(self):
223
+ return self.operator.cond()
224
+
225
+ @property
226
+ def _composite_tensor_fields(self):
227
+ return ("operator",)
228
+
229
+ @property
230
+ def _experimental_parameter_ndims_to_matrix_ndims(self):
231
+ return {"operator": 0}