ZTWHHH commited on
Commit
6911a54
·
verified ·
1 Parent(s): 5cd0aad

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so +3 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/__init__.py +0 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/mlir/__init__.py +0 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/mlir/mlir.py +206 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__init__.py +20 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/__init__.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/jit.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/xla.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__init__.py +0 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__pycache__/xla_sharding.cpython-310.pyc +0 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/xla_sharding.py +587 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/jit.py +156 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/xla.py +620 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__init__.py +0 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/__init__.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses_impl.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/util.cpython-310.pyc +0 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses.py +25 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses_impl.py +1102 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py +263 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__init__.py +171 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_array_ops.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_arrays.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_config.cpython-310.pyc +0 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_dtypes.cpython-310.pyc +0 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_math_ops.cpython-310.pyc +0 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_random.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_utils.cpython-310.pyc +0 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_array_ops.py +2111 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_arrays.py +50 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_config.py +58 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_dtypes.py +208 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_math_ops.py +1642 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_random.py +137 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_utils.py +715 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__init__.py +15 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/__init__.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/control_flow_ops.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/gradients.cpython-310.pyc +0 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/test_util.cpython-310.pyc +0 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/control_flow_ops.py +582 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/gradients.py +144 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/pfor.py +0 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/test_util.py +76 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/__init__.cpython-310.pyc +0 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/dynamic_ragged_shape.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -859,3 +859,9 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summ
859
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so filter=lfs diff=lfs merge=lfs -text
860
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so filter=lfs diff=lfs merge=lfs -text
861
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
859
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so filter=lfs diff=lfs merge=lfs -text
860
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so filter=lfs diff=lfs merge=lfs -text
861
  videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_util_port.so filter=lfs diff=lfs merge=lfs -text
862
+ videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_utils.so filter=lfs diff=lfs merge=lfs -text
863
+ videochat2/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.so filter=lfs diff=lfs merge=lfs -text
864
+ videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.so filter=lfs diff=lfs merge=lfs -text
865
+ videochat2/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.so filter=lfs diff=lfs merge=lfs -text
866
+ videochat2/lib/python3.10/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so filter=lfs diff=lfs merge=lfs -text
867
+ videochat2/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79873ad1d257eaf536c24d3da41fd2eba8317e77851ddcee4eba412f4068ebfa
3
+ size 5595512
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/mlir/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/mlir/mlir.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # =============================================================================
15
+ """mlir is an experimental library that provides support APIs for MLIR."""
16
+
17
+ from tensorflow.python import pywrap_mlir
18
+ from tensorflow.python.util.tf_export import tf_export
19
+
20
+
21
+ @tf_export('mlir.experimental.convert_graph_def')
22
+ def convert_graph_def(
23
+ graph_def, pass_pipeline='tf-standard-pipeline', show_debug_info=False
24
+ ):
25
+ """Import a GraphDef and convert it to a textual MLIR module.
26
+
27
+ This API is only intended for inspecting the internals of TensorFlow and the
28
+ string returned is at the moment intended for debugging purposes.
29
+
30
+ Args:
31
+ graph_def: An object of type graph_pb2.GraphDef or a textual proto
32
+ representation of a valid GraphDef.
33
+ pass_pipeline: A textual description of an MLIR Pass Pipeline to run on the
34
+ module, see MLIR documentation for the [textual pass pipeline
35
+ syntax](https://mlir.llvm.org/docs/PassManagement/#textual-pass-pipeline-specification).
36
+ show_debug_info: Whether to include locations in the emitted textual form.
37
+
38
+ Returns:
39
+ A textual representation of the MLIR module corresponding to the graphdef.
40
+
41
+ Raises:
42
+ InvalidArgumentError: if graph_def is invalid or cannot be converted to
43
+ MLIR.
44
+ """
45
+ return pywrap_mlir.import_graphdef(graph_def, pass_pipeline, show_debug_info)
46
+
47
+
48
+ @tf_export('mlir.experimental.convert_function')
49
+ def convert_function(
50
+ concrete_function,
51
+ pass_pipeline='tf-standard-pipeline',
52
+ show_debug_info=False,
53
+ ):
54
+ """Import a ConcreteFunction and convert it to a textual MLIR module.
55
+
56
+ This API is only intended for inspecting the internals of TensorFlow and the
57
+ string returned is at the moment intended for debugging purposes.
58
+
59
+ A [tf.function](https://www.tensorflow.org/api_docs/python/tf/function) can be
60
+ imported and converted from TensorFlow to TensorFlow MLIR with this API by
61
+ extracting its ConcreteFunction (eagerly-executing wrapper around a
62
+ [tf.Graph](https://www.tensorflow.org/api_docs/python/tf/Graph)).
63
+
64
+ For example:
65
+ >>> @tf.function
66
+ ... def add(a, b):
67
+ ... return a + b
68
+
69
+ >>> concrete_function = add.get_concrete_function(
70
+ ... tf.TensorSpec(None, tf.dtypes.float32),
71
+ ... tf.TensorSpec(None, tf.dtypes.float32))
72
+ >>> tf.mlir.experimental.convert_function(concrete_function)
73
+ '...module attributes {...} {...}...'
74
+
75
+ Args:
76
+ concrete_function: An object of type ConcreteFunction.
77
+ pass_pipeline: A textual description of an MLIR Pass Pipeline to run on the
78
+ module, see MLIR documentation for the [textual pass pipeline
79
+ syntax](https://mlir.llvm.org/docs/PassManagement/#textual-pass-pipeline-specification).
80
+ show_debug_info: Whether to include locations in the emitted textual form.
81
+
82
+ Returns:
83
+ A textual representation of the MLIR module corresponding to the
84
+ ConcreteFunction.
85
+
86
+ Raises:
87
+ InvalidArgumentError: if concrete_function is invalid or cannot be converted
88
+ to MLIR.
89
+ """
90
+ return pywrap_mlir.import_function(
91
+ concrete_function, pass_pipeline, show_debug_info
92
+ )
93
+
94
+
95
+ @tf_export('mlir.experimental.convert_saved_model')
96
+ def convert_saved_model(
97
+ saved_model_path, exported_names, show_debug_info=False
98
+ ):
99
+ """Converts a SavedModel to MLIR module.
100
+
101
+ Args:
102
+ saved_model_path: Path to SavedModel.
103
+ exported_names: Names to export.
104
+ show_debug_info: Whether to include locations in the emitted textual form.
105
+
106
+ Returns:
107
+ A textual representation of the MLIR module corresponding to the
108
+ SavedModel.
109
+ """
110
+ return pywrap_mlir.experimental_convert_saved_model_to_mlir(
111
+ saved_model_path, exported_names, show_debug_info
112
+ )
113
+
114
+
115
+ @tf_export('mlir.experimental.convert_saved_model_v1')
116
+ def convert_saved_model_v1(
117
+ saved_model_path,
118
+ exported_names,
119
+ tags,
120
+ lift_variables,
121
+ include_variables_in_initializers,
122
+ upgrade_legacy=True,
123
+ show_debug_info=False,
124
+ ):
125
+ """Converts a v1 SavedModel to MLIR module.
126
+
127
+ Args:
128
+ saved_model_path: Path to SavedModel.
129
+ exported_names: Names to export.
130
+ tags: MetaGraphDef to be loaded is identified by the supplied tags.
131
+ lift_variables: Whether to promote tf.VarHandleOp to resource arguments.
132
+ include_variables_in_initializers: Keeps the variables in initializers
133
+ before lifting variables.
134
+ upgrade_legacy: Functionalize the input graph before importing.
135
+ show_debug_info: Whether to include locations in the emitted textual form.
136
+
137
+ Returns:
138
+ A textual representation of the MLIR module corresponding to the
139
+ SavedModule.
140
+ """
141
+ return pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(
142
+ saved_model_path,
143
+ exported_names,
144
+ tags,
145
+ lift_variables,
146
+ include_variables_in_initializers,
147
+ upgrade_legacy,
148
+ show_debug_info,
149
+ )
150
+
151
+
152
+ @tf_export('mlir.experimental.run_pass_pipeline')
153
+ def run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info=False):
154
+ """Runs a pipeline over input module.
155
+
156
+ Args:
157
+ mlir_txt: Textual representation of the MLIR module.
158
+ pass_pipeline: Pass pipeline to run on module.
159
+ show_debug_info: Whether to include locations in the emitted textual form.
160
+
161
+ Returns:
162
+ A textual representation of the MLIR module corresponding to the
163
+ transformed module.
164
+ """
165
+ return pywrap_mlir.experimental_run_pass_pipeline(
166
+ mlir_txt, pass_pipeline, show_debug_info
167
+ )
168
+
169
+
170
+ @tf_export('mlir.experimental.write_bytecode')
171
+ def experimental_write_bytecode(filename, mlir_txt):
172
+ """Writes an MLIR module out as bytecode.
173
+
174
+ Args:
175
+ filename: The filename to write to.
176
+ mlir_txt: The MLIR module in textual format.
177
+ """
178
+ pywrap_mlir.experimental_write_bytecode(filename, mlir_txt)
179
+
180
+
181
+ @tf_export('mlir.experimental.tflite_to_tosa_bytecode')
182
+ def tflite_to_tosa_bytecode(
183
+ flatbuffer,
184
+ bytecode,
185
+ use_external_constant=False,
186
+ ordered_input_arrays=None,
187
+ ordered_output_arrays=None,
188
+ ):
189
+ """Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode.
190
+
191
+ Args:
192
+ flatbuffer: Path to flatbuffer.
193
+ bytecode: Path to output bytecode.
194
+ use_external_constant: Whether to create `tfl.external_const` instead of
195
+ `tfl.const`.
196
+ ordered_input_arrays:
197
+ ordered_output_arrays: If ordered_output_arrays is not empty, then the
198
+ function will only return nodes in ordered_output_arrays in the same order
199
+ """
200
+ pywrap_mlir.experimental_tflite_to_tosa_bytecode(
201
+ flatbuffer,
202
+ bytecode,
203
+ use_external_constant,
204
+ ordered_input_arrays,
205
+ ordered_output_arrays,
206
+ )
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """A module for controlling the Tensorflow/XLA JIT compiler."""
16
+
17
+ # pylint: disable=unused-import
18
+ from tensorflow.python.compiler.xla import jit
19
+ from tensorflow.python.compiler.xla import xla
20
+ # pylint: enable=unused-import
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (344 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/jit.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/__pycache__/xla.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/__pycache__/xla_sharding.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/experimental/xla_sharding.py ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the 'License');
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an 'AS IS' BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ======================================
15
+ """Experimental support for defining XLA shardings."""
16
+
17
+ import numpy as _np # Avoids becoming a part of public Tensorflow API.
18
+
19
+ from tensorflow.compiler.tf2xla.python import xla as tf2xla
20
+ from tensorflow.compiler.xla import xla_data_pb2
21
+ from tensorflow.core.framework import attr_value_pb2
22
+
23
+
24
+ class Sharding(object):
25
+ """A class to support adding sharding attributes to Ops.
26
+
27
+ Use the factory constructors and then call apply_to_tensor:
28
+ Sharding.replicate().apply_to_tensor(tensor)
29
+ """
30
+
31
+ def __init__(self, proto=None):
32
+ """Do not use this constructor; use the factory functions below."""
33
+ self._proto = proto
34
+
35
+ @classmethod
36
+ def replicate(cls):
37
+ """Returns a replicated sharding attribute.
38
+
39
+ This causes an op to be computed in its entirety independently on all
40
+ cores in the XLA device.
41
+ """
42
+ return Sharding(
43
+ proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))
44
+
45
+ @classmethod
46
+ def manual(cls):
47
+ """Returns a manuall sharding attribute.
48
+
49
+ This means the op is manually partitioned by the user and XLA will not
50
+ change the shapes.
51
+ """
52
+ return Sharding(
53
+ proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MANUAL))
54
+
55
+ @classmethod
56
+ def assign_device(cls, core):
57
+ """Returns an AssignDevice sharding attribute.
58
+
59
+ This causes an op to be computed in its entirety only on one core in
60
+ the XLA device.
61
+ Args:
62
+ core: The core to assign this Op to.
63
+ """
64
+ return Sharding(
65
+ proto=xla_data_pb2.OpSharding(
66
+ type=xla_data_pb2.OpSharding.MAXIMAL,
67
+ tile_assignment_dimensions=[1],
68
+ tile_assignment_devices=[core]))
69
+
70
+ @classmethod
71
+ def tile(cls, tile_assignment):
72
+ """Returns a Tiled sharding attribute.
73
+
74
+ This causes an op to be partially computed on multiple cores in the
75
+ XLA device.
76
+
77
+ Args:
78
+ tile_assignment: An np.ndarray describing the topology of the tiling and
79
+ which device will compute which part of the topology.
80
+
81
+ Raises:
82
+ TypeError: tile_assignment was not of np.array type.
83
+
84
+ TODO(jmolloy): This concept is nefarious and is not
85
+ something we really want to expose to users (especially as the
86
+ contract for tile_assignment is very strict).
87
+ """
88
+ if not isinstance(tile_assignment, _np.ndarray):
89
+ raise TypeError('Tile assignment must be of type np.ndarray')
90
+ dims = list(tile_assignment.shape)
91
+ flattened_devices = tile_assignment.reshape(-1, order='C')
92
+ return Sharding(
93
+ proto=xla_data_pb2.OpSharding(
94
+ type=xla_data_pb2.OpSharding.OTHER,
95
+ tile_assignment_dimensions=dims,
96
+ tile_assignment_devices=list(flattened_devices)))
97
+
98
+ @classmethod
99
+ def subgroup_tile(cls, tile_assignment, subgroup_modes):
100
+ """Returns a subgroup manual sharding attribute.
101
+
102
+ This is similar to tile(), but tile_assignment has one or more dimension
103
+ than the tensor, and subgroup_modes define the sharding types in the last
104
+ dimensions of tile_assignment.
105
+
106
+ Args:
107
+ tile_assignment: An np.ndarray describing the topology of the tiling and
108
+ which device will compute which part of the topology.
109
+ subgroup_modes: sharding types for the dimension more than the tensor
110
+ shape rank.
111
+
112
+ Raises:
113
+ TypeError: tile_assignment was not of np.array type or subgroup_modes
114
+ has unsupported sharding type.
115
+ """
116
+ if not isinstance(tile_assignment, _np.ndarray):
117
+ raise TypeError('SubgroupTile assignment must be of type np.ndarray')
118
+
119
+ if not isinstance(subgroup_modes, list):
120
+ raise TypeError('subgroup_modes in subgroup manual must be of type list')
121
+
122
+ if len(tile_assignment.shape) < len(subgroup_modes):
123
+ raise TypeError('SubgroupTile assignment must have rank larger than'
124
+ ' length of subgroup_modes')
125
+
126
+ for sharding_type in subgroup_modes:
127
+ if sharding_type not in [
128
+ xla_data_pb2.OpSharding.REPLICATED, xla_data_pb2.OpSharding.MANUAL
129
+ ]:
130
+ raise TypeError(
131
+ 'Each sharding_type in subgroup_modes in subgroup manual must '
132
+ 'be of type xla_data_pb2.OpSharding.REPLICATED'
133
+ ' or xla_data_pb2.OpSharding.MANUAL')
134
+ dims = list(tile_assignment.shape)
135
+ flattened_devices = tile_assignment.reshape(-1, order='C')
136
+ return Sharding(
137
+ proto=xla_data_pb2.OpSharding(
138
+ type=xla_data_pb2.OpSharding.OTHER,
139
+ tile_assignment_dimensions=dims,
140
+ tile_assignment_devices=list(flattened_devices),
141
+ last_tile_dims=list(subgroup_modes)))
142
+
143
+ @classmethod
144
+ def partial_tile(cls, tile_assignment):
145
+ """Returns a partially tiled sharding attribute.
146
+
147
+ This is similar to tile(), but tile_assignment has one more dimension than
148
+ the tensor, and tiles in the last dimension of tile_assignment are
149
+ replicated.
150
+
151
+ Args:
152
+ tile_assignment: An np.ndarray describing the topology of the tiling and
153
+ which device will compute which part of the topology.
154
+
155
+ Raises:
156
+ TypeError: tile_assignment was not of np.array type.
157
+ """
158
+ if not isinstance(tile_assignment, _np.ndarray):
159
+ raise TypeError('PartialTile assignment must be of type np.ndarray')
160
+ dims = list(tile_assignment.shape)
161
+ flattened_devices = tile_assignment.reshape(-1, order='C')
162
+ return Sharding(
163
+ proto=xla_data_pb2.OpSharding(
164
+ type=xla_data_pb2.OpSharding.OTHER,
165
+ tile_assignment_dimensions=dims,
166
+ tile_assignment_devices=list(flattened_devices),
167
+ replicate_on_last_tile_dim=True))
168
+
169
+ @classmethod
170
+ def split(cls, tensor, split_dimension, num_devices, input_shape=None):
171
+ """Returns a Sharding that splits a tensor across a dimension.
172
+
173
+ This creates a Tiled attribute, similar to tile(), but easier to use for the
174
+ common case of tiling a tensor N ways in one dimension.
175
+
176
+ Args:
177
+ tensor: A tf.Tensor to split.
178
+ split_dimension: The dimension number to split.
179
+ num_devices: The number of cores to split `tensor` over.
180
+ input_shape: The shape of the original tensor.
181
+
182
+ Raises:
183
+ ValueError: The tensor to split was smaller in the split dimension than
184
+ the number of devices to split over.
185
+ """
186
+ if input_shape:
187
+ shape = input_shape
188
+ else:
189
+ shape = tensor.shape.as_list()
190
+ if (shape[split_dimension] is not None and
191
+ shape[split_dimension] < num_devices):
192
+ raise ValueError('Split dimension was smaller than the required number '
193
+ 'of splits: shape=%r, dimension=%r, num_devices=%r' %
194
+ (shape, split_dimension, num_devices))
195
+
196
+ tile_assignment_dims = [1] * len(shape)
197
+ tile_assignment_dims[split_dimension] = num_devices
198
+
199
+ return Sharding(
200
+ proto=xla_data_pb2.OpSharding(
201
+ type=xla_data_pb2.OpSharding.OTHER,
202
+ tile_assignment_dimensions=tile_assignment_dims,
203
+ tile_assignment_devices=range(num_devices)))
204
+
205
+ def apply_to_tensor(self,
206
+ tensor,
207
+ assign_tuple_sharding=False,
208
+ use_sharding_op=False,
209
+ unspecified_dims=None):
210
+ """Applies this Sharding attribute to `tensor`.
211
+
212
+ Args:
213
+ tensor: A tf.Tensor to split.
214
+ assign_tuple_sharding: If the sharding type should be a tuple.
215
+ use_sharding_op: Whether to create a sharding op on `tensor`.
216
+ unspecified_dims: An optional list of dimensions unspecified.
217
+
218
+ Returns:
219
+ The tensor with Sharding attribute.
220
+ """
221
+ if unspecified_dims:
222
+ assert use_sharding_op and not assign_tuple_sharding
223
+ proto = self._proto
224
+ if use_sharding_op:
225
+ if assign_tuple_sharding:
226
+ proto = self._create_tuple_proto(num_outputs=1)
227
+ tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString())
228
+ else:
229
+ tensor = tf2xla.sharding(
230
+ tensor,
231
+ sharding=proto.SerializeToString(),
232
+ unspecified_dims=unspecified_dims or [])
233
+ elif assign_tuple_sharding or len(tensor.op.outputs) > 1:
234
+ proto = self._get_or_create_tuple_proto(tensor.op)
235
+ # We can't mutate an element of old_proto.tuple_shardings, so create
236
+ # a new proto.
237
+ tuple_shardings = list(proto.tuple_shardings)
238
+ tuple_shardings[tensor.value_index] = self._proto
239
+ proto = xla_data_pb2.OpSharding(
240
+ type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)
241
+
242
+ # TODO(jmolloy): This need to be seriously revisited before declaring this
243
+ # API available for public use.
244
+ # pylint: disable=protected-access
245
+ tensor.op._set_attr('_XlaSharding',
246
+ attr_value_pb2.AttrValue(s=proto.SerializeToString()))
247
+ return tensor
248
+
249
+ def apply_to_operation(self, operation):
250
+ """Applies this Sharding attribute to `operation`.
251
+
252
+ Args:
253
+ operation: A tf.Operation to add sharding annotation.
254
+ """
255
+ attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString())
256
+ # pylint: disable=protected-access
257
+ operation._set_attr('_XlaSharding', attr_value)
258
+
259
+ @property
260
+ def proto(self):
261
+ """Return the sharding protobuf of type xla_data_pb2.OpSharding."""
262
+ return self._proto
263
+
264
+ def _get_or_create_tuple_proto(self, op):
265
+ try:
266
+ attr = op.get_attr('_XlaSharding')
267
+ proto = xla_data_pb2.OpSharding()
268
+ proto.ParseFromString(attr)
269
+ return proto
270
+ except ValueError:
271
+ return self._create_tuple_proto(len(op.outputs))
272
+
273
+ def _create_tuple_proto(self, num_outputs):
274
+ shardings = [
275
+ xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED)
276
+ ] * num_outputs
277
+ return xla_data_pb2.OpSharding(
278
+ type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=shardings)
279
+
280
+
281
+ def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):
282
+ """Copies the a tensor's sharding to another.
283
+
284
+ Args:
285
+ from_tensor: Source tensor. Must be the sole output of an op.
286
+ to_tensor: the tensor the annotate with the copy.
287
+ use_sharding_op: whether to create a sharding op on `to_tensor`.
288
+
289
+ Returns:
290
+ A tensor with sharding annotation copied from `from_tensor`.
291
+ """
292
+ sharding = get_tensor_sharding(from_tensor)
293
+ if sharding is None:
294
+ return to_tensor
295
+
296
+ if use_sharding_op:
297
+ to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)
298
+ attr_value = attr_value_pb2.AttrValue(s=sharding)
299
+ # pylint: disable=protected-access
300
+ to_tensor.op._set_attr('_XlaSharding', attr_value)
301
+ return to_tensor
302
+
303
+ # Helpers for the above factory functions that allow easy application of
304
+ # shardings, for example:
305
+ # tensor = xla_sharding.replicate(tensor)
306
+
307
+
308
+ def replicate(tensor, assign_tuple_sharding=False, use_sharding_op=False):
309
+ return Sharding.replicate().apply_to_tensor(
310
+ tensor,
311
+ assign_tuple_sharding=assign_tuple_sharding,
312
+ use_sharding_op=use_sharding_op)
313
+
314
+
315
+ def assign_device(tensor,
316
+ device,
317
+ assign_tuple_sharding=False,
318
+ use_sharding_op=False):
319
+ """Returns a tensor that has AssignDevice sharding attribute."""
320
+ return Sharding.assign_device(device).apply_to_tensor(
321
+ tensor,
322
+ assign_tuple_sharding=assign_tuple_sharding,
323
+ use_sharding_op=use_sharding_op)
324
+
325
+
326
+ def tile(tensor,
327
+ tile_assignment,
328
+ assign_tuple_sharding=False,
329
+ use_sharding_op=False,
330
+ unspecified_dims=None):
331
+ """Returns a tensor that has tiled sharding.
332
+
333
+ Args:
334
+ tensor: A tf.Tensor to shard.
335
+ tile_assignment: An np.ndarray describing the topology of the tiling and
336
+ which device will compute which part of the topology.
337
+ assign_tuple_sharding: If the sharding type should be a tuple.
338
+ use_sharding_op: If true, adds a sharding op to set the sharding.
339
+ unspecified_dims: An optional list of dimensions unspecified.
340
+ """
341
+ return Sharding.tile(tile_assignment).apply_to_tensor(
342
+ tensor,
343
+ assign_tuple_sharding=assign_tuple_sharding,
344
+ use_sharding_op=use_sharding_op,
345
+ unspecified_dims=unspecified_dims or [])
346
+
347
+
348
+ def split(tensor,
349
+ split_dimension,
350
+ num_devices,
351
+ assign_tuple_sharding=False,
352
+ use_sharding_op=False,
353
+ input_shape=None):
354
+ """Returns a tensor that is split along the given dimension.
355
+
356
+ Args:
357
+ tensor: A tf.Tensor to split.
358
+ split_dimension: The dimension to split.
359
+ num_devices: The number of devices to partition the dimension.
360
+ assign_tuple_sharding: If the sharding type should be a tuple.
361
+ use_sharding_op: If true, adds a sharding op to set the sharding.
362
+ input_shape: The full shape of the input tensor.
363
+ """
364
+ return Sharding.split(tensor, split_dimension, num_devices,
365
+ input_shape).apply_to_tensor(
366
+ tensor,
367
+ assign_tuple_sharding=assign_tuple_sharding,
368
+ use_sharding_op=use_sharding_op)
369
+
370
+
371
+ def partial_tile(tensor,
372
+ tile_assignment,
373
+ use_sharding_op=False,
374
+ unspecified_dims=None):
375
+ """Returns a tensor that has tiled sharding.
376
+
377
+ Args:
378
+ tensor: A tf.Tensor to shard.
379
+ tile_assignment: An np.ndarray describing the topology of the tiling and
380
+ which device will compute which part of the topology. It must have one
381
+ more dimension than tensor, and the last dimension represents partially
382
+ replicated tiles.
383
+ use_sharding_op: If true, adds a sharding op to set the sharding.
384
+ unspecified_dims: An optional list of dimensions unspecified.
385
+ """
386
+ return Sharding.partial_tile(tile_assignment).apply_to_tensor(
387
+ tensor,
388
+ use_sharding_op=use_sharding_op,
389
+ unspecified_dims=unspecified_dims or [])
390
+
391
+
392
+ def get_op_sharding(op):
393
+ """Returns sharding attribute of an op.
394
+
395
+ Args:
396
+ op: a TensorFlow op.
397
+
398
+ Returns:
399
+ The attribute representing XLA sharding on this op.
400
+ """
401
+ try:
402
+ return op.get_attr('_XlaSharding')
403
+ except ValueError:
404
+ return None
405
+ except AttributeError:
406
+ # AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
407
+ return None
408
+
409
+
410
+ def get_tensor_sharding(tensor):
411
+ """Returns sharding attribute of a Tensor.
412
+
413
+ Args:
414
+ tensor: a Tensor.
415
+
416
+ Returns:
417
+ The attribute representing XLA sharding on tensor's op.
418
+ """
419
+ try:
420
+ return get_op_sharding(tensor.op)
421
+ except AttributeError:
422
+ # AttributeError: Tensor.op is meaningless when eager execution is enabled.
423
+ return None
424
+
425
+
426
+ def get_sharding_tile_shape(sharding):
427
+ """Returns the tile assignment shape for a sharded Tensor.
428
+
429
+ Args:
430
+ sharding: a serialized OpSharding message describing the layout of a
431
+ sharded Tensor.
432
+
433
+ Returns:
434
+ A list, for each dimension of the sharded Tensor, of the number of shards
435
+ into which it has been split. Returns None if the input indicates no tile
436
+ assignments.
437
+ """
438
+ if sharding is None:
439
+ return None
440
+ sharding_message = xla_data_pb2.OpSharding()
441
+ sharding_message.ParseFromString(sharding)
442
+ if sharding_message.tile_assignment_dimensions:
443
+ return sharding_message.tile_assignment_dimensions
444
+ else:
445
+ return None
446
+
447
+
448
+ def auto_to_manual_spmd_partition(tensor,
449
+ manual_sharding,
450
+ single_dim=-1,
451
+ unspecified_dims=None):
452
+ """Switches from automatic SPMD partitioning to manual partitioning.
453
+
454
+ Converts a full-shaped tensor (to be automatically partitioned by SPMD
455
+ partitioner) to a shard-shaped tensor to be consumed by manually partitioned
456
+ ops.
457
+
458
+ Args:
459
+ tensor: A tf.Tensor in full shape.
460
+ manual_sharding: A serialized string of OpSharding to be used in manual
461
+ partitioning.
462
+ single_dim: If >= 0, the conversion will happen only on this dim in
463
+ subgroups.
464
+ unspecified_dims: An optional list of dimensions unspecified.
465
+
466
+ Returns:
467
+ A shard-shaped tensor to be consumed by manually partitioned ops.
468
+ """
469
+ return tf2xla.spmd_full_to_shard_shape(
470
+ tensor,
471
+ manual_sharding=manual_sharding,
472
+ dim=single_dim,
473
+ unspecified_dims=unspecified_dims or [])
474
+
475
+
476
+ def manual_to_auto_spmd_partition(tensor,
477
+ manual_sharding,
478
+ full_shape,
479
+ single_dim=-1,
480
+ unspecified_dims=None):
481
+ """Switches from manual partitioning to automatic SPMD partitioning.
482
+
483
+ Converts a shard-shaped tensor (manually partitioned in SPMD-style) to a
484
+ full-shaped tensor to be partitioned automatically by the SPMD partitioner.
485
+
486
+ Args:
487
+ tensor: A tf.Tensor in shard shape.
488
+ manual_sharding: a serialized string of OpSharding to be used in manual
489
+ partitioning.
490
+ full_shape: the shape of tensor before partitioning.
491
+ single_dim: If >= 0, the conversion will happen only on this dim in
492
+ subgroups.
493
+ unspecified_dims: An optional list of dimensions unspecified.
494
+
495
+ Returns:
496
+ A full-shaped tensor to be partitioned automatically by the SPMD
497
+ partitioner.
498
+ """
499
+ return tf2xla.spmd_shard_to_full_shape(
500
+ tensor,
501
+ manual_sharding=manual_sharding,
502
+ full_shape=full_shape,
503
+ dim=single_dim,
504
+ unspecified_dims=unspecified_dims or [])
505
+
506
+
507
+ def mesh_split_sharding(device_mesh,
508
+ tensor_split_dims_mapping,
509
+ manual_mesh_dims=None):
510
+ """Returns a Sharding object representing sharding along multiple dimensions.
511
+
512
+ Args:
513
+ device_mesh: An np.ndarray describing the topology of the device mesh and
514
+ each element is the ID of the device in the topology.
515
+ tensor_split_dims_mapping: A list of integers that map each tensor axis to
516
+ the device mesh axis along which it is sharded. Its length is the tensor
517
+ rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
518
+ dimension i. Use -1 for tensor dimensions that are not sharded.
519
+ manual_mesh_dims: An optional list of mesh dims for manual subgroups.
520
+
521
+ Raises:
522
+ ValueError: The number of tensor split dimensions is larger than device mesh
523
+ rank.
524
+ """
525
+ manual_mesh_dims = manual_mesh_dims or []
526
+ permutation = [d for d in tensor_split_dims_mapping if d >= 0
527
+ ] + manual_mesh_dims
528
+ if len(permutation) > len(device_mesh.shape):
529
+ raise ValueError(
530
+ 'Number of tensor split dimensions (%r) is larger than device mesh '
531
+ 'rank (%r). tensor_split_dims_mapping: %r, device_mesh.shape: %r' %
532
+ (len(permutation), len(
533
+ device_mesh.shape), tensor_split_dims_mapping, device_mesh.shape))
534
+ # Append replicated dimensions to the end.
535
+ transpose_permutation = permutation + [
536
+ d for d in range(len(device_mesh.shape)) if d not in permutation
537
+ ]
538
+ tile_assignment = _np.transpose(device_mesh, transpose_permutation)
539
+ tile_shape = [
540
+ 1 if d < 0 else device_mesh.shape[d]
541
+ for d in (tensor_split_dims_mapping + manual_mesh_dims)
542
+ ]
543
+ subgroup_modes = [xla_data_pb2.OpSharding.MANUAL] * len(manual_mesh_dims)
544
+ partial = len(permutation) < len(device_mesh.shape)
545
+ if partial:
546
+ tile_shape.append(_np.prod(device_mesh.shape) // _np.prod(tile_shape))
547
+ subgroup_modes.append(xla_data_pb2.OpSharding.REPLICATED)
548
+ tile_assignment = _np.reshape(tile_assignment, tile_shape)
549
+
550
+ if manual_mesh_dims:
551
+ return Sharding.subgroup_tile(tile_assignment, subgroup_modes)
552
+
553
+ if partial:
554
+ return Sharding.partial_tile(tile_assignment)
555
+ return Sharding.tile(tile_assignment)
556
+
557
+
558
+ def mesh_split(tensor,
559
+ device_mesh,
560
+ tensor_split_dims_mapping,
561
+ use_sharding_op=False,
562
+ manual_mesh_dims=None,
563
+ unspecified_dims=None):
564
+ """Returns a tensor that is split along multiple dimensions in a device mesh.
565
+
566
+ Args:
567
+ tensor: A tf.Tensor to split.
568
+ device_mesh: An np.ndarray describing the topology of the device mesh and
569
+ each element is the ID of the device in the topology.
570
+ tensor_split_dims_mapping: A list of integers that map each tensor axis to
571
+ the device mesh axis along which it is sharded. Its length is the tensor
572
+ rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
573
+ dimension i. Use -1 for tensor dimensions that are not sharded.
574
+ use_sharding_op: If true, adds a sharding op to set the sharding.
575
+ manual_mesh_dims: An optional list of mesh dims for manual subgroups.
576
+ unspecified_dims: An optional list of dimensions unspecified.
577
+
578
+ Raises:
579
+ ValueError: The number of tensor split dimensions is larger than device mesh
580
+ rank.
581
+ """
582
+ sharding = mesh_split_sharding(device_mesh, tensor_split_dims_mapping,
583
+ manual_mesh_dims)
584
+ return sharding.apply_to_tensor(
585
+ tensor,
586
+ use_sharding_op=use_sharding_op,
587
+ unspecified_dims=unspecified_dims or [])
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/jit.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Library for controlling the Tensorflow/XLA JIT compiler."""
16
+
17
+ import contextlib
18
+
19
+ from tensorflow.core.framework import attr_value_pb2
20
+ from tensorflow.python.eager import context
21
+ from tensorflow.python.framework import ops
22
+ from tensorflow.python.util.tf_export import tf_export
23
+
24
+
25
+ _XLA_SCOPE_KEY = ("__xla_scope",)
26
+
27
+
28
+ class _XlaScope(object):
29
+ """Keeps track of previous XLA scope calls, and depth of current call."""
30
+
31
+ def __init__(self, count, depth):
32
+ self.count = count
33
+ self.depth = depth
34
+
35
+
36
+ @contextlib.contextmanager
37
+ @tf_export("xla.experimental.jit_scope")
38
+ def experimental_jit_scope(compile_ops=True, separate_compiled_gradients=False):
39
+ """Enable or disable JIT compilation of operators within the scope.
40
+
41
+ NOTE: This is an experimental feature.
42
+
43
+ The compilation is a hint and only supported on a best-effort basis.
44
+
45
+ Example usage:
46
+
47
+ ```python
48
+ with tf.xla.experimental.jit_scope():
49
+ c = tf.matmul(a, b) # compiled
50
+ with tf.xla.experimental.jit_scope(compile_ops=False):
51
+ d = tf.matmul(a, c) # not compiled
52
+ with tf.xla.experimental.jit_scope(
53
+ compile_ops=lambda node_def: 'matmul' in node_def.op.lower()):
54
+ e = tf.matmul(a, b) + d # matmul is compiled, the addition is not.
55
+ ```
56
+
57
+ Example of `separate_compiled_gradients`:
58
+
59
+ ```python
60
+ # In the example below, the computations for f, g and h will all be compiled
61
+ # in separate scopes.
62
+ with tf.xla.experimental.jit_scope(
63
+ separate_compiled_gradients=True):
64
+ f = tf.matmul(a, b)
65
+ g = tf.gradients([f], [a, b], name='mygrads1')
66
+ h = tf.gradients([f], [a, b], name='mygrads2')
67
+ ```
68
+
69
+ Ops that are not in the scope may be clustered and compiled with ops in
70
+ the scope with `compile_ops=True`, while the ops in the scope with
71
+ `compile_ops=False` will never be compiled.
72
+
73
+ For example:
74
+
75
+ ```python
76
+ # In the example below, x and loss may be clustered and compiled together,
77
+ # while y will not be compiled.
78
+ with tf.xla.experimental.jit_scope():
79
+ x = tf.matmul(a, b)
80
+ with tf.xla.experimental.jit_scope(compile_ops=False):
81
+ y = tf.matmul(c, d)
82
+ loss = x + y
83
+ ```
84
+
85
+ If you want to only compile the ops in the scope with `compile_ops=True`,
86
+ consider adding an outer `jit_scope(compile_ops=False)`:
87
+
88
+ ```python
89
+ # In the example below, only x will be compiled.
90
+ with tf.xla.experimental.jit_scope(compile_ops=False):
91
+ with tf.xla.experimental.jit_scope():
92
+ x = tf.matmul(a, b)
93
+ y = tf.matmul(c, d)
94
+ loss = x + y
95
+ ```
96
+
97
+ Args:
98
+ compile_ops: Whether to enable or disable compilation in the scope.
99
+ Either a Python bool, or a callable that accepts the parameter
100
+ `node_def` and returns a python bool.
101
+ separate_compiled_gradients: If true put each gradient subgraph into a
102
+ separate compilation scope. This gives fine-grained control over which
103
+ portions of the graph will be compiled as a single unit. Compiling
104
+ gradients separately may yield better performance for some graphs.
105
+ The scope is named based on the scope of the forward computation as well
106
+ as the name of the gradients. As a result, the gradients will be compiled
107
+ in a scope that is separate from both the forward computation, and from
108
+ other gradients.
109
+ Raises:
110
+ RuntimeError: if called when eager execution is enabled.
111
+ Yields:
112
+ The current scope, enabling or disabling compilation.
113
+ """
114
+ if context.executing_eagerly():
115
+ raise RuntimeError("xla.experimental.jit_scope is not supported when eager "
116
+ "execution is enabled. Try use it inside tf.function.")
117
+
118
+ if callable(compile_ops):
119
+ def xla_compile(node_def):
120
+ return attr_value_pb2.AttrValue(b=compile_ops(node_def))
121
+ else:
122
+ xla_compile = attr_value_pb2.AttrValue(b=compile_ops)
123
+
124
+ attrs = {
125
+ "_XlaCompile":
126
+ xla_compile,
127
+ "_XlaSeparateCompiledGradients":
128
+ attr_value_pb2.AttrValue(b=bool(separate_compiled_gradients))
129
+ }
130
+
131
+ # Find the singleton counter for the current scoped graph. If it
132
+ # doesn't exist, create one.
133
+ xla_scope_counter = ops.get_collection(_XLA_SCOPE_KEY)
134
+ if not xla_scope_counter:
135
+ xla_scope_counter = _XlaScope(0, 0)
136
+ ops.add_to_collection(_XLA_SCOPE_KEY, xla_scope_counter)
137
+ else:
138
+ xla_scope_counter = xla_scope_counter[0]
139
+
140
+ if xla_scope_counter.depth == 0:
141
+ # If we're at the root xla scope, we can increase the counter so
142
+ # future calls to jit_scope use a different scope value.
143
+ # If we're already within a scope, we'll be fusing using the scope
144
+ # controlled by the parent.
145
+ attrs["_XlaScope"] = attr_value_pb2.AttrValue(
146
+ s=("jit_scope_%d" % xla_scope_counter.count).encode())
147
+ xla_scope_counter.count += 1
148
+
149
+ xla_scope_counter.depth += 1
150
+
151
+ # pylint: disable=protected-access
152
+ with ops.get_default_graph()._attr_scope(attrs):
153
+ yield
154
+ # pylint: enable=protected-access
155
+
156
+ xla_scope_counter.depth -= 1
videochat2/lib/python3.10/site-packages/tensorflow/python/compiler/xla/xla.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # =============================================================================
15
+ """xla is an experimental library that provides XLA support APIs."""
16
+
17
+ import contextlib
18
+
19
+
20
+ from tensorflow.compiler.jit.ops import xla_ops
21
+ from tensorflow.compiler.jit.ops import xla_ops_grad # pylint: disable=unused-import
22
+ from tensorflow.core.framework import attr_value_pb2
23
+ from tensorflow.python.distribute import summary_op_util
24
+ from tensorflow.python.eager import context
25
+ from tensorflow.python.eager import def_function
26
+ from tensorflow.python.framework import ops
27
+ from tensorflow.python.ops import array_ops
28
+ from tensorflow.python.ops import control_flow_ops
29
+ from tensorflow.python.ops import variable_scope
30
+ from tensorflow.python.platform import tf_logging as logging
31
+ from tensorflow.python.util import compat
32
+ from tensorflow.python.util import nest
33
+ from tensorflow.python.util import tf_inspect
34
+ from tensorflow.python.util.compat import collections_abc
35
+ from tensorflow.python.util.deprecation import deprecated
36
+ from tensorflow.python.util.tf_export import tf_export
37
+
38
+ _XLA_COMPILE_ATTR = '_xla_compile_id'
39
+ _MAX_WARNING_LINES = 5
40
+
41
+ # Operations that indicate some error in the users graph. For example, XLA
42
+ # computation should not have any Placeholder op.
43
+ _DENYLISTED_OPS = set([
44
+ 'Placeholder',
45
+ ])
46
+
47
+ # XLA doesn't currently support reading of intermediate tensors, thus some ops
48
+ # are not supported.
49
+ _UNSUPPORTED_OPS = set([
50
+ 'AudioSummary',
51
+ 'AudioSummaryV2',
52
+ 'HistogramSummary',
53
+ 'ImageSummary',
54
+ 'MergeSummary',
55
+ 'Print',
56
+ 'ScalarSummary',
57
+ 'TensorSummary',
58
+ 'TensorSummaryV2',
59
+ ])
60
+
61
+
62
+ @tf_export('xla.experimental.compile')
63
+ @deprecated(
64
+ None, 'xla.experimental.compile is deprecated. Consider using '
65
+ '`@tf.function(jit_compile=True)`.',
66
+ warn_once=True)
67
+ def compile(computation, inputs=None): # pylint: disable=redefined-builtin
68
+ """Builds an operator that compiles and runs `computation` with XLA.
69
+
70
+ NOTE: In eager mode, `computation` will have `@tf.function` semantics.
71
+
72
+ Args:
73
+ computation: A Python function that builds a computation to apply to the
74
+ input. If the function takes n inputs, 'inputs' should be a list of n
75
+ `Tensor`s.
76
+
77
+ `computation` may return a list of `Tensor`s and `Operation`s.
78
+ `Tensor`s must come before `Operation`s in the returned list.
79
+
80
+ All `Operation`s returned from `computation` will be executed when
81
+ evaluating any of the returned output tensors.
82
+ inputs: A list of inputs or `None` (equivalent to an empty list). Each input
83
+ can be a nested structure containing values that can be converted to
84
+ `Tensor`s. Note that passing an N-dimension list of compatible values will
85
+ result in an N-dimension list of scalar `Tensor`s rather than a single
86
+ Rank-N `Tensor`. If you need a different behavior, convert parts of
87
+ `inputs` to `Tensor`s with `tf.convert_to_tensor`.
88
+
89
+ Returns:
90
+ List of `Tensor`s corresponding to the `Tensor`s from
91
+ the output of `computation` i.e. the same return value as if
92
+ computation(*inputs) is called directly, with the following exceptions:
93
+ * None output: a NoOp would be returned with a control dependency on
94
+ `computation`.
95
+ * Single value output: a tuple containing the value would be returned.
96
+ * Operation-only outputs: a NoOp would be returned with a control
97
+ dependency on `computation`.
98
+ TODO(b/121383831): Investigate into removing these special cases.
99
+
100
+ Raises:
101
+ RuntimeError: When eager execution is enabled.
102
+
103
+ Known issues:
104
+ When a tf.random operation is built with XLA, the implementation doesn't
105
+ pass the user provided seed to the XLA compiler. As such, the XLA compiler
106
+ generates a random number and uses it as a seed when compiling the
107
+ operation. This implementation causes a violation of the Tensorflow
108
+ defined semantics in two aspects. First, changing the value of the user
109
+ defined seed doesn't change the numbers generated by the operation.
110
+ Second, when a seed is not specified, running the program multiple times
111
+ will generate the same numbers.
112
+ """
113
+ if context.executing_eagerly():
114
+
115
+ @def_function.function
116
+ def xla_compile_wrapper():
117
+ return _compile_internal(computation, inputs)
118
+
119
+ return xla_compile_wrapper()
120
+
121
+ return _compile_internal(computation, inputs)
122
+
123
+
124
+ class XLACompileContext(control_flow_ops.XLAControlFlowContext):
125
+ """A `ControlFlowContext` for nodes inside an XLA computation cluster.
126
+
127
+ THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY.
128
+
129
+ The primary role of `XLACompileContext` is to mark operators inside a
130
+ xla.compile() computation with attribute "_xla_compile_id=XYZ", where XYZ is
131
+ a unique name.
132
+
133
+ `ControlFlowContext` is used to perform the annotation since it integrates
134
+ with Tensorflow constructs like ResourceVariables. For example, if a
135
+ `ResourceVariable` is constructed inside a xla.compile() block, the
136
+ `ResourceVariable` implementation can use
137
+ `with ops.control_dependencies(None)` to build the variable's definition
138
+ outside the compiled computation.
139
+ """
140
+
141
+ def __init__(self, name, pivot):
142
+ """Builds a new XLACompileContext.
143
+
144
+ Args:
145
+ name: a unique name for the context, used to populate the
146
+ `_xla_compile_id` attribute.
147
+ pivot: a pivot node. Nodes in the XLACompileContext that do not have any
148
+ inputs will have a control dependency on the pivot node. This ensures
149
+ that nodes are correctly included in any enclosing control flow
150
+ contexts.
151
+ """
152
+ super(XLACompileContext, self).__init__()
153
+ self._name = name
154
+ self._name_as_bytes = compat.as_bytes(name)
155
+ self._unsupported_ops = []
156
+ self._pivot = pivot
157
+
158
+ def report_unsupported_operations(self):
159
+ if self._unsupported_ops:
160
+ op_str = '\n'.join([
161
+ ' %s (%s)' % (op.type, op.name)
162
+ for op in self._unsupported_ops[:_MAX_WARNING_LINES]
163
+ ])
164
+ logging.warning('%d unsupported operations found: \n%s',
165
+ len(self._unsupported_ops), op_str)
166
+ if len(self._unsupported_ops) > _MAX_WARNING_LINES:
167
+ logging.warning('... and %d more',
168
+ len(self._unsupported_ops) - _MAX_WARNING_LINES)
169
+
170
+ def _RemoveExternalControlEdges(self, op: ops.Operation):
171
+ """Remove any external control dependency on this op."""
172
+ internal_control_inputs = []
173
+ external_control_inputs = []
174
+ for x in op.control_inputs:
175
+ # pylint: disable=protected-access
176
+ is_internal_op = False
177
+ ctxt = x._get_control_flow_context()
178
+ while ctxt is not None:
179
+ if ctxt == self:
180
+ is_internal_op = True
181
+ break
182
+ ctxt = ctxt._outer_context
183
+ if is_internal_op:
184
+ internal_control_inputs.append(x)
185
+ else:
186
+ external_control_inputs.append(x)
187
+ # pylint: enable=protected-access
188
+ # pylint: disable=protected-access
189
+ op._remove_all_control_inputs()
190
+ op._add_control_inputs(internal_control_inputs)
191
+ # pylint: enable=protected-access
192
+ return internal_control_inputs, external_control_inputs
193
+
194
+ def AddOp(self, op: ops.Operation):
195
+ """Create op in XLACompileContext and notifies outer context recursively."""
196
+ # pylint: disable=protected-access
197
+ if op.type in _DENYLISTED_OPS:
198
+ logging.error(
199
+ 'Operation of type %s (%s) is not supported in XLA. Execution will '
200
+ 'fail if this op is used in the graph. ', op.type, op.name)
201
+
202
+ # TODO(ycao): Automatically disable summaries instead of reporting them.
203
+ if op.type in _UNSUPPORTED_OPS:
204
+ self._unsupported_ops.append(op)
205
+
206
+ if any(x.dtype._is_ref_dtype for x in op.inputs):
207
+ raise NotImplementedError(
208
+ 'Non-resource Variables are not supported inside XLA computations '
209
+ '(operator name: %s)' % op.name)
210
+
211
+ if _XLA_COMPILE_ATTR in op.node_def.attr:
212
+ raise ValueError('XLA compiled computations cannot be nested, (operator '
213
+ 'name: %s)' % op.name)
214
+
215
+ op._set_attr(
216
+ _XLA_COMPILE_ATTR, attr_value_pb2.AttrValue(s=self._name_as_bytes))
217
+
218
+ op.graph.prevent_feeding(op)
219
+ op.graph.prevent_fetching(op)
220
+
221
+ # Remove any control edges from outer control flow contexts. These may cause
222
+ # mismatched frame errors. An example is when one of op's inputs is
223
+ # generated in a different While control flow context.
224
+ (internal_control_inputs,
225
+ external_control_inputs) = self._RemoveExternalControlEdges(op)
226
+
227
+ if not op.inputs:
228
+ # Add a control edge from the control pivot to this op.
229
+ if not internal_control_inputs:
230
+ # pylint: disable=protected-access
231
+ op._add_control_input(self._pivot)
232
+ # pylint: enable=protected-access
233
+ else:
234
+ for index in range(len(op.inputs)):
235
+ x = op.inputs[index]
236
+ real_x = self.AddValue(x)
237
+ if real_x is not x:
238
+ op._update_input(index, real_x) # pylint: disable=protected-access
239
+
240
+ if external_control_inputs:
241
+ # Use an identity to pull control inputs as data inputs. Note that we
242
+ # ignore ops which don't have outputs. TODO(phawkins): fix that.
243
+ with ops.control_dependencies(None):
244
+ self.Enter()
245
+ external_control_inputs = [
246
+ array_ops.identity(x.outputs[0]).op
247
+ for x in external_control_inputs
248
+ if x.outputs
249
+ ]
250
+ self.Exit()
251
+ # pylint: disable=protected-access
252
+ op._add_control_inputs(external_control_inputs)
253
+ # pylint: enable=protected-access
254
+
255
+ # Mark op's outputs as seen by this context and any outer contexts.
256
+ output_names = [x.name for x in op.outputs]
257
+ context = self
258
+ while context is not None:
259
+ # pylint: disable=protected-access
260
+ context._values.update(output_names)
261
+ context = context._outer_context
262
+ # pylint: enable=protected-access
263
+
264
+ if self._outer_context:
265
+ self._outer_context.AddInnerOp(op)
266
+
267
+ def AddValue(self, val):
268
+ """Add `val` to the current context and its outer context recursively."""
269
+ if val.name in self._values:
270
+ # Use the real value if it comes from outer context.
271
+ result = self._external_values.get(val.name)
272
+ return val if result is None else result
273
+
274
+ result = val
275
+ self._values.add(val.name)
276
+ if self._outer_context:
277
+ result = self._outer_context.AddValue(val)
278
+ self._values.add(result.name)
279
+
280
+ self._external_values[val.name] = result
281
+
282
+ return result
283
+
284
+ def AddInnerOp(self, op: ops.Operation):
285
+ self.AddOp(op)
286
+ if self._outer_context:
287
+ self._outer_context.AddInnerOp(op)
288
+
289
+ @property
290
+ def grad_state(self):
291
+ # Define the gradient loop state associated with the XLACompileContext to
292
+ # be None as the XLACompileContext does not get nested nor does the
293
+ # grad_state outside the XLACompileContext affect the graph inside so the
294
+ # grad_state should be as if this is the top-level gradient state.
295
+ return None
296
+
297
+ @property
298
+ def back_prop(self):
299
+ """Forwards to the enclosing while context, if any."""
300
+ if self.GetWhileContext():
301
+ return self.GetWhileContext().back_prop
302
+ return False
303
+
304
+
305
+ def _compile_internal(computation, inputs=None):
306
+ """Builds graph operators that compiles and symbolically executes computation.
307
+
308
+ Args:
309
+ computation: A Python function that builds the computation to compile and
310
+ execute.
311
+ inputs: A list of inputs or `None` (equivalent to an empty list). Each input
312
+ can be a nested structure containing values that are convertible to
313
+ tensors. Note that passing an N-dimension list of compatible values will
314
+ result in a N-dimension list of scalar tensors rather than a single Rank-N
315
+ tensors. If you need different behavior, convert part of inputs to tensors
316
+ with `tf.convert_to_tensor`.
317
+
318
+ Returns:
319
+ Same data structure as if computation(*inputs) is called directly with some
320
+ exceptions for correctness. Exceptions include: 1) None output 2) Single
321
+ value output 3) Operation-only outputs
322
+ Raises:
323
+ ValueError: If any element in computation outputs is neither an operations
324
+ or a value that can be converted to tensor.
325
+ ValueError: If computation outputs is non-flat and contains any Operations.
326
+ TypeError: If `inputs` is not a list or tuple.
327
+ """
328
+ if inputs is None:
329
+ inputs = []
330
+
331
+ if not isinstance(inputs, collections_abc.Sequence):
332
+ raise TypeError('inputs must be a list')
333
+
334
+ # Flatten inputs.
335
+ flat_inputs = nest.flatten(inputs)
336
+ # Converts inputs to Tensors.
337
+ flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs]
338
+
339
+ cluster_name = ops.get_default_graph().unique_name('cluster')
340
+ pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')
341
+ context = XLACompileContext(name=cluster_name, pivot=pivot)
342
+ try:
343
+ context.Enter()
344
+
345
+ # Add identity ops so even unused inputs are 'consumed' by the
346
+ # computation.
347
+ flat_inputs = [
348
+ array_ops.identity(x, name='input_{}'.format(i))
349
+ for i, x in enumerate(flat_inputs)
350
+ ]
351
+
352
+ # Re-pack flat_inputs in same structure as 'inputs'.
353
+ computation_inputs = nest.pack_sequence_as(
354
+ structure=inputs, flat_sequence=flat_inputs)
355
+
356
+ # Only resource variables work inside an XLA computation, so turn on
357
+ # resource variables for the computation.
358
+ vscope = variable_scope.get_variable_scope()
359
+ saved_use_resource = vscope.use_resource
360
+ vscope.set_use_resource(True)
361
+
362
+ with _disable_summary_context():
363
+ outputs = computation(*computation_inputs)
364
+
365
+ # Restore variable scope after computation.
366
+ vscope.set_use_resource(saved_use_resource)
367
+
368
+ outputs_is_flat = is_flat(outputs)
369
+ if outputs_is_flat:
370
+ output_tensors, control_deps = _postprocess_flat_outputs(outputs)
371
+ else:
372
+ output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
373
+
374
+ context.ExitResult(output_tensors)
375
+ finally:
376
+ context.report_unsupported_operations()
377
+ context.Exit()
378
+
379
+ # When XLA computation returns only operations and no tensors, a NoOp
380
+ # dependent on the operations in outputs is returned. Otherwise final
381
+ # outputs would be empty and there is no way to trigger returned
382
+ # operations.
383
+ if not output_tensors:
384
+ return control_flow_ops.group(control_deps, name='output_0')
385
+
386
+ output_tensors = [
387
+ xla_ops.xla_cluster_output(o, name='output{}'.format(i))
388
+ for i, o in enumerate(output_tensors)
389
+ ]
390
+
391
+ with ops.control_dependencies(control_deps):
392
+ # Wraps the outputs in identity operators that carries control
393
+ # dependencies.
394
+ output_tensors = [
395
+ array_ops.identity(o, name='output_%d' % i)
396
+ for i, o in enumerate(output_tensors)
397
+ ]
398
+
399
+ # If `computation` returned non-flat output structure, pack output tensors
400
+ # back into same structure.
401
+ if not outputs_is_flat:
402
+ output_tensors = nest.pack_sequence_as(
403
+ structure=outputs, flat_sequence=output_tensors)
404
+
405
+ return output_tensors
406
+
407
+
408
+ def is_flat(outputs):
409
+ """Checks if outputs is a flat structure.
410
+
411
+ Following structures and values are considered flat:
412
+ 1) None
413
+ 2) A single object
414
+ 3) A list or tuple of Tensors/Operations
415
+
416
+ The only structures that this function understands are sequences,
417
+ dictionaries and types defined using the attrs library. E.g. this means
418
+ that if outputs contains a single user-defined Object, it is considered to
419
+ be flat. Errors are raised later on if that Object cannot be converted to a
420
+ Tensor.
421
+
422
+ Args:
423
+ outputs: Output from `computation` inside `xla.compile`.
424
+
425
+ Returns:
426
+ A boolean indicates whether outputs is flat.
427
+ """
428
+ # If outputs is a list or tuple, check if it has any nested structure. If
429
+ # there is, then outputs is non-flat.
430
+ if isinstance(outputs, collections_abc.Sequence):
431
+ for o in outputs:
432
+ if (isinstance(o, collections_abc.Sequence) or
433
+ isinstance(o, collections_abc.Mapping) or
434
+ hasattr(o.__class__, '__attrs_attrs__')):
435
+ return False
436
+
437
+ # If outputs is a dict, it is non-flat.
438
+ if isinstance(outputs, collections_abc.Mapping):
439
+ return False
440
+
441
+ # If outputs is from the attrs library, it is non-flat.
442
+ if hasattr(outputs.__class__, '__attrs_attrs__'):
443
+ return False
444
+
445
+ # Getting here means either outputs itself is a single non-structured value
446
+ # or it is a flat list of single non-structured values.
447
+ return True
448
+
449
+
450
+ def _postprocess_flat_outputs(outputs):
451
+ """Validates flat outputs and adds back device assignments.
452
+
453
+ Args:
454
+ outputs: Output from `computation` inside `xla.compile`.
455
+
456
+ Returns:
457
+ Tensors and Operations extracted from outputs.
458
+ """
459
+ # Following code segment is to preserve legacy behavior. Previously we only
460
+ # supported flat outputs and thus for consistency it was nice to convert even
461
+ # single element into a tuple. But now that we support arbitrary output
462
+ # structure, this is no longer necessary.
463
+ # TODO(b/121383831): Migrate all legacy use cases and delete this special
464
+ # case.
465
+ # If the computation returns `None`, make it an empty tuple.
466
+ if outputs is None:
467
+ outputs = tuple()
468
+ # If the computation only returned one value, make it a tuple.
469
+ if not isinstance(outputs, collections_abc.Sequence):
470
+ outputs = (outputs,)
471
+
472
+ # Append `no_op` here so that return value of this function always contains
473
+ # at least one op that can trigger XlaLaunch node.
474
+ outputs += (control_flow_ops.no_op(),)
475
+ try:
476
+ outputs = [
477
+ o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
478
+ for o in outputs
479
+ ]
480
+ except Exception as e:
481
+ raise ValueError(
482
+ 'XLA computation function return values must all either be Operations'
483
+ ' or convertible to Tensors. Got error: "%s"' % str(e))
484
+
485
+ # Separates the returned Operations and Tensors.
486
+ output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
487
+ output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
488
+
489
+ if outputs != output_tensors + output_operations:
490
+ raise ValueError(
491
+ 'XLA computation function must return zero or more Tensor values '
492
+ 'followed by zero or more Operations.')
493
+
494
+ new_output_tensors = []
495
+ for t in output_tensors:
496
+ with ops.device(t.device if t.device else ''):
497
+ new_output_tensors.append(array_ops.identity(t))
498
+
499
+ return new_output_tensors, output_operations
500
+
501
+
502
+ def _postprocess_non_flat_outputs(outputs):
503
+ """Validates non-flat outputs and adds back device assignments.
504
+
505
+ Args:
506
+ outputs: Output from `computation` inside `xla.compile`.
507
+
508
+ Returns:
509
+ Tensors extracted from outputs and an empty list because Operations are not
510
+ allowed in non-flat outputs..
511
+ """
512
+ # Convert all non-Operation outputs to Tensors.
513
+ new_output_tensors = []
514
+ for o in nest.flatten(outputs):
515
+ if isinstance(o, ops.Operation):
516
+ raise ValueError(
517
+ 'xla.compile does not support Operation as return value in non-flat '
518
+ 'output structure. You can set returned Operations as control '
519
+ 'dependencies of returned Tensors so Operations are triggered when '
520
+ 'Tensors are evaluated. Operation found: "%s"' % o.name)
521
+
522
+ try:
523
+ o = ops.convert_to_tensor(o)
524
+ except Exception as e:
525
+ raise ValueError(
526
+ 'XLA computation function return values must all either be '
527
+ 'Operations or convertible to Tensors. Got error: "%s"' % str(e))
528
+
529
+ # Makes sure even pass-through inputs/outputs are touched in compile
530
+ # context by creating an Identity node inside compile context.
531
+ with ops.device(o.device if o.device else ''):
532
+ new_output_tensors.append(array_ops.identity(o))
533
+
534
+ return new_output_tensors, []
535
+
536
+
537
+ @contextlib.contextmanager
538
+ def _disable_summary_context():
539
+ """Enters a context where all summary ops are skipped.
540
+
541
+ Summaries are not yet supported in xla.compile(). So we provide this context
542
+ manager that can skip creating summary ops. This is a temporary workaround due
543
+ to XLA not supporting summary ops.
544
+
545
+ Yields:
546
+ None.
547
+ """
548
+ original_skip_summary_func = summary_op_util.skip_summary
549
+ summary_op_util.skip_summary = lambda: True
550
+
551
+ try:
552
+ yield
553
+ finally:
554
+ summary_op_util.skip_summary = original_skip_summary_func
555
+
556
+
557
+ class _CapturedObject(object):
558
+ """A placeholder to capture an object."""
559
+
560
+ def __init__(self):
561
+ self._object = None
562
+
563
+ def capture(self, o):
564
+ if self._object:
565
+ raise RuntimeError(
566
+ 'InternalError: _CapturedObject can capture only once. Please file '
567
+ 'bug.')
568
+
569
+ self._object = o
570
+
571
+ def get(self):
572
+ return self._object
573
+
574
+
575
+ def check_function_argument_count(func, input_arity, infeed_queue):
576
+ """Validate the number of input arguments to an XLA function.
577
+
578
+ Args:
579
+ func: the Python function that will be called to generate the body of an XLA
580
+ computation graph.
581
+ input_arity: the number of explicit arguments supplied by the caller.
582
+ infeed_queue: if not None, the infeed queue that will supply
583
+ additional arguments to the function.
584
+
585
+ Returns:
586
+ None if function can be called with the supplied number of
587
+ arguments, or an error string if it cannot.
588
+ """
589
+ def format_error(complaint, quantity):
590
+ return '%s %d argument%s' % (complaint, quantity, ''
591
+ if quantity == 1 else 's')
592
+
593
+ num_args_supplied = input_arity
594
+ if infeed_queue is not None:
595
+ num_args_supplied += infeed_queue.number_of_tuple_elements
596
+ arg_spec = tf_inspect.getargspec(func)
597
+ num_func_args = len(arg_spec.args)
598
+ if arg_spec.defaults is None:
599
+ num_func_defaults = 0
600
+ else:
601
+ num_func_defaults = len(arg_spec.defaults)
602
+ min_func_args = num_func_args - num_func_defaults
603
+ if num_args_supplied < min_func_args:
604
+ # The required number of arguments is not enough to call the function.
605
+ if num_func_defaults == 0 and arg_spec.varargs is None:
606
+ return format_error('exactly', num_func_args)
607
+ else:
608
+ return format_error('at least', min_func_args)
609
+ if arg_spec.varargs is None and num_args_supplied > num_func_args:
610
+ # The required number of arguments is too many to call the function.
611
+ if num_func_defaults == 0:
612
+ return format_error('exactly', num_func_args)
613
+ else:
614
+ return format_error('at most', num_func_args)
615
+ # Reaching here means either
616
+ # 1) There are varargs, func can accept any number of arguments greater than
617
+ # the minimum.
618
+ # 2) Number of supplied arguments falls in range of acceptable argument count
619
+ # of func.
620
+ return None
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses.cpython-310.pyc ADDED
Binary file (460 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses_impl.cpython-310.pyc ADDED
Binary file (40.3 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/util.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Loss operations for use in neural networks.
16
+
17
+ Note: All the losses are added to the `GraphKeys.LOSSES` collection by default.
18
+
19
+ API docstring: tensorflow.losses
20
+ """
21
+
22
+ # pylint: disable=wildcard-import
23
+ from tensorflow.python.ops.losses.losses_impl import *
24
+ from tensorflow.python.ops.losses.util import *
25
+ # pylint: enable=wildcard-import
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses_impl.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Implementation of Loss operations for use in neural networks."""
16
+
17
+ from tensorflow.python.eager import context
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.ops import array_ops
21
+ from tensorflow.python.ops import cond
22
+ from tensorflow.python.ops import confusion_matrix
23
+ from tensorflow.python.ops import control_flow_ops
24
+ from tensorflow.python.ops import math_ops
25
+ from tensorflow.python.ops import nn
26
+ from tensorflow.python.ops import nn_ops
27
+ from tensorflow.python.ops import weights_broadcast_ops
28
+ from tensorflow.python.ops.losses import util
29
+ from tensorflow.python.util import dispatch
30
+ from tensorflow.python.util.deprecation import deprecated_args
31
+ from tensorflow.python.util.deprecation import deprecated_argument_lookup
32
+ from tensorflow.python.util.tf_export import tf_export
33
+
34
+
35
+ @tf_export(v1=["losses.Reduction"])
36
+ class Reduction:
37
+ """Types of loss reduction.
38
+
39
+ Contains the following values:
40
+
41
+ * `NONE`: Un-reduced weighted losses with the same shape as input.
42
+ * `SUM`: Scalar sum of weighted losses.
43
+ * `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED.
44
+ * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
45
+ * `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero
46
+ weights. DEPRECATED.
47
+ * `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED.
48
+ """
49
+
50
+ NONE = "none"
51
+ SUM = "weighted_sum"
52
+ SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size"
53
+ MEAN = "weighted_mean"
54
+ SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights"
55
+ SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS
56
+
57
+ @classmethod
58
+ def all(cls):
59
+ return (
60
+ cls.NONE,
61
+ cls.SUM,
62
+ cls.MEAN,
63
+ cls.SUM_OVER_BATCH_SIZE,
64
+ cls.SUM_OVER_NONZERO_WEIGHTS,
65
+ cls.SUM_BY_NONZERO_WEIGHTS)
66
+
67
+ @classmethod
68
+ def validate(cls, key):
69
+ if key not in cls.all():
70
+ raise ValueError(f"Invalid Reduction Key {key}. Key should be one of "
71
+ f"{cls.all()}.")
72
+
73
+
74
+ def _safe_mean(losses, num_present):
75
+ """Computes a safe mean of the losses.
76
+
77
+ Args:
78
+ losses: `Tensor` whose elements contain individual loss measurements.
79
+ num_present: The number of measurable elements in `losses`.
80
+
81
+ Returns:
82
+ A scalar representing the mean of `losses`. If `num_present` is zero,
83
+ then zero is returned.
84
+ """
85
+ total_loss = math_ops.reduce_sum(losses)
86
+ return math_ops.div_no_nan(total_loss, num_present, name="value")
87
+
88
+
89
+ def _num_present(losses, weights, per_batch=False):
90
+ """Computes the number of elements in the loss function induced by `weights`.
91
+
92
+ A given weights tensor induces different numbers of usable elements in the
93
+ `losses` tensor. The `weights` tensor is broadcast across `losses` for all
94
+ possible dimensions. For example, if `losses` is a tensor of dimension
95
+ `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
96
+ in effect, tiled to match the shape of `losses`. Following this effective
97
+ tile, the total number of present elements is the number of non-zero weights.
98
+
99
+ Args:
100
+ losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
101
+ weights: `Tensor` of shape `[]`, `[batch_size]` or
102
+ `[batch_size, d1, ... dK]`, where K < N.
103
+ per_batch: Whether to return the number of elements per batch or as a sum
104
+ total.
105
+
106
+ Returns:
107
+ The number of present (non-zero) elements in the losses tensor. If
108
+ `per_batch` is `True`, the value is returned as a tensor of size
109
+ `[batch_size]`. Otherwise, a single scalar tensor is returned.
110
+ """
111
+ if ((isinstance(weights, float) and weights != 0.0) or
112
+ (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
113
+ and not math_ops.equal(weights, 0.0))):
114
+ return _num_elements(losses)
115
+ with ops.name_scope(None, "num_present", (losses, weights)) as scope:
116
+ weights = math_ops.cast(weights, dtype=dtypes.float32)
117
+ present = array_ops.where(
118
+ math_ops.equal(weights, 0.0),
119
+ array_ops.zeros_like(weights),
120
+ array_ops.ones_like(weights))
121
+ present = weights_broadcast_ops.broadcast_weights(present, losses)
122
+ if per_batch:
123
+ return math_ops.reduce_sum(
124
+ present,
125
+ axis=math_ops.range(1, array_ops.rank(present)),
126
+ keepdims=True,
127
+ name=scope)
128
+ return math_ops.reduce_sum(present, name=scope)
129
+
130
+
131
+ def _num_elements(losses):
132
+ """Computes the number of elements in `losses` tensor."""
133
+ with ops.name_scope(None, "num_elements", values=[losses]) as scope:
134
+ return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
135
+
136
+
137
+ @tf_export(v1=["losses.compute_weighted_loss"])
138
+ @dispatch.add_dispatch_support
139
+ def compute_weighted_loss(
140
+ losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
141
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
142
+ """Computes the weighted loss.
143
+
144
+ Args:
145
+ losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
146
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
147
+ `losses`, and must be broadcastable to `losses` (i.e., all dimensions must
148
+ be either `1`, or the same as the corresponding `losses` dimension).
149
+ scope: the scope for the operations performed in computing the loss.
150
+ loss_collection: the loss will be added to these collections.
151
+ reduction: Type of reduction to apply to loss.
152
+
153
+ Returns:
154
+ Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
155
+ `NONE`, this has the same shape as `losses`; otherwise, it is scalar.
156
+
157
+ Raises:
158
+ ValueError: If `weights` is `None` or the shape is not compatible with
159
+ `losses`, or if the number of dimensions (rank) of either `losses` or
160
+ `weights` is missing.
161
+
162
+ Note:
163
+ When calculating the gradient of a weighted loss contributions from
164
+ both `losses` and `weights` are considered. If your `weights` depend
165
+ on some model parameters but you do not want this to affect the loss
166
+ gradient, you need to apply `tf.stop_gradient` to `weights` before
167
+ passing them to `compute_weighted_loss`.
168
+
169
+ @compatibility(eager)
170
+ The `loss_collection` argument is ignored when executing eagerly. Consider
171
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
172
+ @end_compatibility
173
+ """
174
+ Reduction.validate(reduction)
175
+ with ops.name_scope(scope, "weighted_loss", (losses, weights)):
176
+ # Save the `reduction` argument for loss normalization when distributing
177
+ # to multiple replicas. Used only for estimator + v1 optimizer flow.
178
+ ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
179
+
180
+ def compute_loss(losses, weights, loss_collection, reduction):
181
+ losses = ops.convert_to_tensor(losses)
182
+ input_dtype = losses.dtype
183
+ losses = math_ops.cast(losses, dtype=dtypes.float32)
184
+ weights = math_ops.cast(weights, dtype=dtypes.float32)
185
+ weighted_losses = math_ops.multiply(losses, weights)
186
+ if reduction == Reduction.NONE:
187
+ loss = weighted_losses
188
+ else:
189
+ loss = math_ops.reduce_sum(weighted_losses)
190
+ if reduction == Reduction.MEAN:
191
+ loss = _safe_mean(
192
+ loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights))
193
+ elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or
194
+ reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS):
195
+ loss = _safe_mean(loss, _num_present(losses, weights))
196
+ elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
197
+ loss = _safe_mean(loss, _num_elements(losses))
198
+
199
+ # Convert the result back to the input type.
200
+ loss = math_ops.cast(loss, input_dtype)
201
+ util.add_loss(loss, loss_collection)
202
+ return loss
203
+
204
+ # Skip the assert_broadcastable in XLA context because asserts are not
205
+ # supported so it only causes unnecessary ops. Also skip it because it uses
206
+ # a DenseToDenseSetOperation op that is incompatible with XLA when
207
+ # the shape(s) are dynamic.
208
+ if control_flow_ops.get_enclosing_xla_context() is not None:
209
+ return compute_loss(losses, weights, loss_collection, reduction)
210
+ else:
211
+ with ops.control_dependencies(
212
+ (weights_broadcast_ops.assert_broadcastable(weights, losses),)):
213
+ return compute_loss(losses, weights, loss_collection, reduction)
214
+
215
+
216
+ @tf_export(v1=["losses.absolute_difference"])
217
+ @dispatch.add_dispatch_support
218
+ def absolute_difference(
219
+ labels, predictions, weights=1.0, scope=None,
220
+ loss_collection=ops.GraphKeys.LOSSES,
221
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
222
+ """Adds an Absolute Difference loss to the training procedure.
223
+
224
+ `weights` acts as a coefficient for the loss. If a scalar is provided, then
225
+ the loss is simply scaled by the given value. If `weights` is a `Tensor` of
226
+ shape `[batch_size]`, then the total loss for each sample of the batch is
227
+ rescaled by the corresponding element in the `weights` vector. If the shape of
228
+ `weights` matches the shape of `predictions`, then the loss of each
229
+ measurable element of `predictions` is scaled by the corresponding value of
230
+ `weights`.
231
+
232
+ Args:
233
+ labels: The ground truth output tensor, same dimensions as 'predictions'.
234
+ predictions: The predicted outputs.
235
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
236
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
237
+ be either `1`, or the same as the corresponding `losses` dimension).
238
+ scope: The scope for the operations performed in computing the loss.
239
+ loss_collection: collection to which this loss will be added.
240
+ reduction: Type of reduction to apply to loss.
241
+
242
+ Returns:
243
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
244
+ shape as `labels`; otherwise, it is scalar.
245
+
246
+ Raises:
247
+ ValueError: If the shape of `predictions` doesn't match that of
248
+ `labels` or if the shape of `weights` is invalid or if `labels`
249
+ or `predictions` is None.
250
+
251
+ @compatibility(eager)
252
+ The `loss_collection` argument is ignored when executing eagerly. Consider
253
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
254
+ @end_compatibility
255
+ """
256
+ if labels is None:
257
+ raise ValueError("Argument `labels` must not be None.")
258
+ if predictions is None:
259
+ raise ValueError("Argument `predictions` must not be None.")
260
+ with ops.name_scope(scope, "absolute_difference",
261
+ (predictions, labels, weights)) as scope:
262
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
263
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
264
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
265
+ losses = math_ops.abs(math_ops.subtract(predictions, labels))
266
+ return compute_weighted_loss(
267
+ losses, weights, scope, loss_collection, reduction=reduction)
268
+
269
+
270
+ @tf_export(v1=["losses.cosine_distance"])
271
+ @dispatch.add_dispatch_support
272
+ @deprecated_args(None, "dim is deprecated, use axis instead", "dim")
273
+ def cosine_distance(
274
+ labels, predictions, axis=None, weights=1.0, scope=None,
275
+ loss_collection=ops.GraphKeys.LOSSES,
276
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,
277
+ dim=None):
278
+ """Adds a cosine-distance loss to the training procedure.
279
+
280
+ Note that the function assumes that `predictions` and `labels` are already
281
+ unit-normalized.
282
+
283
+ Args:
284
+ labels: `Tensor` whose shape matches 'predictions'
285
+ predictions: An arbitrary matrix.
286
+ axis: The dimension along which the cosine distance is computed.
287
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
288
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
289
+ be either `1`, or the same as the corresponding `losses` dimension).
290
+ scope: The scope for the operations performed in computing the loss.
291
+ loss_collection: collection to which this loss will be added.
292
+ reduction: Type of reduction to apply to loss.
293
+ dim: The old (deprecated) name for `axis`.
294
+
295
+ Returns:
296
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
297
+ shape as `labels`; otherwise, it is scalar.
298
+
299
+ Raises:
300
+ ValueError: If `predictions` shape doesn't match `labels` shape, or
301
+ `axis`, `labels`, `predictions` or `weights` is `None`.
302
+
303
+ @compatibility(eager)
304
+ The `loss_collection` argument is ignored when executing eagerly. Consider
305
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
306
+ @end_compatibility
307
+ """
308
+ axis = deprecated_argument_lookup("axis", axis, "dim", dim)
309
+ if axis is None:
310
+ raise ValueError("You must specify argument `axis`.")
311
+ if labels is None:
312
+ raise ValueError("Argument `labels` must not be None.")
313
+ if predictions is None:
314
+ raise ValueError("Argument `predictions` must not be None.")
315
+ with ops.name_scope(scope, "cosine_distance_loss",
316
+ (predictions, labels, weights)) as scope:
317
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
318
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
319
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
320
+
321
+ radial_diffs = math_ops.multiply(predictions, labels)
322
+ losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
323
+ return compute_weighted_loss(
324
+ losses, weights, scope, loss_collection, reduction=reduction)
325
+
326
+
327
+ @tf_export(v1=["losses.hinge_loss"])
328
+ @dispatch.add_dispatch_support
329
+ def hinge_loss(labels, logits, weights=1.0, scope=None,
330
+ loss_collection=ops.GraphKeys.LOSSES,
331
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
332
+ """Adds a hinge loss to the training procedure.
333
+
334
+ Args:
335
+ labels: The ground truth output tensor. Its shape should match the shape of
336
+ logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
337
+ the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
338
+ logits: The logits, a float tensor. Note that logits are assumed to be
339
+ unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
340
+ (resp. negative) binary prediction.
341
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
342
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
343
+ be either `1`, or the same as the corresponding `losses` dimension).
344
+ scope: The scope for the operations performed in computing the loss.
345
+ loss_collection: collection to which the loss will be added.
346
+ reduction: Type of reduction to apply to loss.
347
+
348
+ Returns:
349
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
350
+ shape as `labels`; otherwise, it is scalar.
351
+
352
+ Raises:
353
+ ValueError: If the shapes of `logits` and `labels` don't match or
354
+ if `labels` or `logits` is None.
355
+
356
+ @compatibility(eager)
357
+ The `loss_collection` argument is ignored when executing eagerly. Consider
358
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
359
+ @end_compatibility
360
+ """
361
+ if labels is None:
362
+ raise ValueError("Argument `labels` must not be None.")
363
+ if logits is None:
364
+ raise ValueError("Argument `logits` must not be None.")
365
+ with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
366
+ logits = math_ops.cast(logits, dtype=dtypes.float32)
367
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
368
+ logits.get_shape().assert_is_compatible_with(labels.get_shape())
369
+ # We first need to convert binary labels to -1/1 labels (as floats).
370
+ all_ones = array_ops.ones_like(labels)
371
+ labels = math_ops.subtract(2 * labels, all_ones)
372
+ losses = nn_ops.relu(
373
+ math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
374
+ return compute_weighted_loss(
375
+ losses, weights, scope, loss_collection, reduction=reduction)
376
+
377
+
378
+ @tf_export(v1=["losses.huber_loss"])
379
+ @dispatch.add_dispatch_support
380
+ def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
381
+ loss_collection=ops.GraphKeys.LOSSES,
382
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
383
+ """Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure.
384
+
385
+ For each value x in `error=labels-predictions`, the following is calculated:
386
+
387
+ ```
388
+ 0.5 * x^2 if |x| <= d
389
+ 0.5 * d^2 + d * (|x| - d) if |x| > d
390
+ ```
391
+
392
+ where d is `delta`.
393
+
394
+ `weights` acts as a coefficient for the loss. If a scalar is provided, then
395
+ the loss is simply scaled by the given value. If `weights` is a tensor of size
396
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
397
+ by the corresponding element in the `weights` vector. If the shape of
398
+ `weights` matches the shape of `predictions`, then the loss of each
399
+ measurable element of `predictions` is scaled by the corresponding value of
400
+ `weights`.
401
+
402
+ Args:
403
+ labels: The ground truth output tensor, same dimensions as 'predictions'.
404
+ predictions: The predicted outputs.
405
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
406
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
407
+ be either `1`, or the same as the corresponding `losses` dimension).
408
+ delta: `float`, the point where the huber loss function changes from a
409
+ quadratic to linear.
410
+ scope: The scope for the operations performed in computing the loss.
411
+ loss_collection: collection to which the loss will be added.
412
+ reduction: Type of reduction to apply to loss.
413
+
414
+ Returns:
415
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
416
+ shape as `labels`; otherwise, it is scalar.
417
+
418
+ Raises:
419
+ ValueError: If the shape of `predictions` doesn't match that of `labels` or
420
+ if the shape of `weights` is invalid. Also if `labels` or
421
+ `predictions` is None.
422
+
423
+ @compatibility(eager)
424
+ The `loss_collection` argument is ignored when executing eagerly. Consider
425
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
426
+ @end_compatibility
427
+ """
428
+ if labels is None:
429
+ raise ValueError("Argument `labels` must not be None.")
430
+ if predictions is None:
431
+ raise ValueError("Argument `predictions` must not be None.")
432
+ with ops.name_scope(scope, "huber_loss",
433
+ (predictions, labels, weights)) as scope:
434
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
435
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
436
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
437
+ error = math_ops.subtract(predictions, labels)
438
+ abs_error = math_ops.abs(error)
439
+ quadratic = math_ops.minimum(abs_error, delta)
440
+ # The following expression is the same in value as
441
+ # tf.maximum(abs_error - delta, 0), but importantly the gradient for the
442
+ # expression when abs_error == delta is 0 (for tf.maximum it would be 1).
443
+ # This is necessary to avoid doubling the gradient, since there is already a
444
+ # nonzero contribution to the gradient from the quadratic term.
445
+ linear = math_ops.subtract(abs_error, quadratic)
446
+ losses = math_ops.add(
447
+ math_ops.multiply(
448
+ ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
449
+ math_ops.multiply(quadratic, quadratic)),
450
+ math_ops.multiply(delta, linear))
451
+ return compute_weighted_loss(
452
+ losses, weights, scope, loss_collection, reduction=reduction)
453
+
454
+
455
+ @tf_export(v1=["losses.log_loss"])
456
+ @dispatch.add_dispatch_support
457
+ def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
458
+ loss_collection=ops.GraphKeys.LOSSES,
459
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
460
+ """Adds a Log Loss term to the training procedure.
461
+
462
+ `weights` acts as a coefficient for the loss. If a scalar is provided, then
463
+ the loss is simply scaled by the given value. If `weights` is a tensor of size
464
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
465
+ by the corresponding element in the `weights` vector. If the shape of
466
+ `weights` matches the shape of `predictions`, then the loss of each
467
+ measurable element of `predictions` is scaled by the corresponding value of
468
+ `weights`.
469
+
470
+ Args:
471
+ labels: The ground truth output tensor, same dimensions as 'predictions'.
472
+ predictions: The predicted outputs.
473
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
474
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
475
+ be either `1`, or the same as the corresponding `losses` dimension).
476
+ epsilon: A small increment to add to avoid taking a log of zero.
477
+ scope: The scope for the operations performed in computing the loss.
478
+ loss_collection: collection to which the loss will be added.
479
+ reduction: Type of reduction to apply to loss.
480
+
481
+ Returns:
482
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
483
+ shape as `labels`; otherwise, it is scalar.
484
+
485
+ Raises:
486
+ ValueError: If the shape of `predictions` doesn't match that of `labels` or
487
+ if the shape of `weights` is invalid. Also if `labels` or `predictions`
488
+ is None.
489
+
490
+ @compatibility(eager)
491
+ The `loss_collection` argument is ignored when executing eagerly. Consider
492
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
493
+ @end_compatibility
494
+ """
495
+ if labels is None:
496
+ raise ValueError("Argument `labels` must not be None.")
497
+ if predictions is None:
498
+ raise ValueError("Argument `predictions` must not be None.")
499
+ with ops.name_scope(scope, "log_loss",
500
+ (predictions, labels, weights)) as scope:
501
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
502
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
503
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
504
+ losses = -math_ops.multiply(
505
+ labels,
506
+ math_ops.log(predictions + epsilon)) - math_ops.multiply(
507
+ (1 - labels), math_ops.log(1 - predictions + epsilon))
508
+ return compute_weighted_loss(
509
+ losses, weights, scope, loss_collection, reduction=reduction)
510
+
511
+
512
+ # TODO(b/37208492): Add reduction arg.
513
+ @tf_export(v1=["losses.mean_pairwise_squared_error"])
514
+ @dispatch.add_dispatch_support
515
+ def mean_pairwise_squared_error(
516
+ labels, predictions, weights=1.0, scope=None,
517
+ loss_collection=ops.GraphKeys.LOSSES):
518
+ """Adds a pairwise-errors-squared loss to the training procedure.
519
+
520
+ Unlike `mean_squared_error`, which is a measure of the differences between
521
+ corresponding elements of `predictions` and `labels`,
522
+ `mean_pairwise_squared_error` is a measure of the differences between pairs of
523
+ corresponding elements of `predictions` and `labels`.
524
+
525
+ For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
526
+ three pairs of differences are summed to compute the loss:
527
+ loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
528
+
529
+ Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
530
+ corresponding pairs are computed within each batch sample but not across
531
+ samples within a batch. For example, if `predictions` represents a batch of
532
+ 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
533
+ is drawn from each image, but not across images.
534
+
535
+ `weights` acts as a coefficient for the loss. If a scalar is provided, then
536
+ the loss is simply scaled by the given value. If `weights` is a tensor of size
537
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
538
+ by the corresponding element in the `weights` vector.
539
+
540
+ Args:
541
+ labels: The ground truth output tensor, whose shape must match the shape of
542
+ `predictions`.
543
+ predictions: The predicted outputs, a tensor of size
544
+ `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
545
+ `predictions`.
546
+ weights: Coefficients for the loss a scalar, a tensor of shape
547
+ `[batch_size]` or a tensor whose shape matches `predictions`.
548
+ scope: The scope for the operations performed in computing the loss.
549
+ loss_collection: collection to which the loss will be added.
550
+
551
+ Returns:
552
+ A scalar `Tensor` that returns the weighted loss.
553
+
554
+ Raises:
555
+ ValueError: If the shape of `predictions` doesn't match that of `labels` or
556
+ if the shape of `weights` is invalid. Also if `labels` or `predictions`
557
+ is None.
558
+
559
+ @compatibility(eager)
560
+ The `loss_collection` argument is ignored when executing eagerly. Consider
561
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
562
+ @end_compatibility
563
+ """
564
+ if labels is None:
565
+ raise ValueError("Argument `labels` must not be None.")
566
+ if predictions is None:
567
+ raise ValueError("Argument `predictions` must not be None.")
568
+ with ops.name_scope(scope, "mean_pairwise_squared_error",
569
+ (predictions, labels, weights)) as scope:
570
+ weights = math_ops.cast(weights, dtype=dtypes.float32)
571
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
572
+
573
+ def compute_loss(labels, predictions, weights, loss_collection):
574
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
575
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
576
+
577
+ diffs = math_ops.subtract(predictions, labels)
578
+
579
+ axis = math_ops.range(1, array_ops.rank(diffs))
580
+
581
+ sum_squares_diff_per_batch = math_ops.reduce_sum(
582
+ math_ops.square(diffs), axis=axis, keepdims=True)
583
+ num_present_per_batch = _num_present(diffs, weights, per_batch=True)
584
+
585
+ term1 = 2.0 * math_ops.div_no_nan(
586
+ sum_squares_diff_per_batch,
587
+ math_ops.maximum(num_present_per_batch - 1, 0),
588
+ name="value")
589
+
590
+ sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)
591
+ term2 = 2.0 * math_ops.div_no_nan(
592
+ math_ops.square(sum_diff),
593
+ math_ops.maximum(
594
+ math_ops.multiply(num_present_per_batch,
595
+ num_present_per_batch - 1), 0),
596
+ name="value")
597
+
598
+ weighted_losses = math_ops.multiply(term1 - term2, weights)
599
+ loss = math_ops.reduce_sum(weighted_losses)
600
+
601
+ mean_loss = array_ops.where(
602
+ math_ops.reduce_sum(num_present_per_batch) > 0,
603
+ loss,
604
+ array_ops.zeros_like(loss),
605
+ name="value")
606
+ util.add_loss(mean_loss, loss_collection)
607
+ return mean_loss
608
+
609
+ # Skip the assert_broadcastable in XLA context because asserts are not
610
+ # supported so it only causes unnecessary ops. Also skip it because it uses
611
+ # a DenseToDenseSetOperation op that is incompatible with XLA when
612
+ # the shape(s) are dynamic.
613
+ if control_flow_ops.get_enclosing_xla_context() is not None:
614
+ return compute_loss(labels, predictions, weights, loss_collection)
615
+ else:
616
+ with ops.control_dependencies(
617
+ (weights_broadcast_ops.assert_broadcastable(weights, labels),)):
618
+ return compute_loss(labels, predictions, weights, loss_collection)
619
+
620
+
621
+ @tf_export(v1=["losses.mean_squared_error"])
622
+ @dispatch.add_dispatch_support
623
+ def mean_squared_error(
624
+ labels, predictions, weights=1.0, scope=None,
625
+ loss_collection=ops.GraphKeys.LOSSES,
626
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
627
+ """Adds a Sum-of-Squares loss to the training procedure.
628
+
629
+ `weights` acts as a coefficient for the loss. If a scalar is provided, then
630
+ the loss is simply scaled by the given value. If `weights` is a tensor of size
631
+ `[batch_size]`, then the total loss for each sample of the batch is rescaled
632
+ by the corresponding element in the `weights` vector. If the shape of
633
+ `weights` matches the shape of `predictions`, then the loss of each
634
+ measurable element of `predictions` is scaled by the corresponding value of
635
+ `weights`.
636
+
637
+ Args:
638
+ labels: The ground truth output tensor, same dimensions as 'predictions'.
639
+ predictions: The predicted outputs.
640
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
641
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
642
+ be either `1`, or the same as the corresponding `losses` dimension).
643
+ scope: The scope for the operations performed in computing the loss.
644
+ loss_collection: collection to which the loss will be added.
645
+ reduction: Type of reduction to apply to loss.
646
+
647
+ Returns:
648
+ Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
649
+ shape as `labels`; otherwise, it is scalar.
650
+
651
+ Raises:
652
+ ValueError: If the shape of `predictions` doesn't match that of `labels` or
653
+ if the shape of `weights` is invalid. Also if `labels` or `predictions`
654
+ is None.
655
+
656
+ @compatibility(TF2)
657
+
658
+ `tf.compat.v1.losses.mean_squared_error` is mostly compatible with eager
659
+ execution and `tf.function`. But, the `loss_collection` argument is
660
+ ignored when executing eagerly and no loss will be written to the loss
661
+ collections. You will need to either hold on to the return value manually
662
+ or rely on `tf.keras.Model` loss tracking.
663
+
664
+
665
+ To switch to native TF2 style, instantiate the
666
+ `tf.keras.losses.MeanSquaredError` class and call the object instead.
667
+
668
+
669
+ #### Structural Mapping to Native TF2
670
+
671
+ Before:
672
+
673
+ ```python
674
+ loss = tf.compat.v1.losses.mean_squared_error(
675
+ labels=labels,
676
+ predictions=predictions,
677
+ weights=weights,
678
+ reduction=reduction)
679
+ ```
680
+
681
+ After:
682
+
683
+ ```python
684
+ loss_fn = tf.keras.losses.MeanSquaredError(
685
+ reduction=reduction)
686
+ loss = loss_fn(
687
+ y_true=labels,
688
+ y_pred=predictions,
689
+ sample_weight=weights)
690
+ ```
691
+
692
+ #### How to Map Arguments
693
+
694
+ | TF1 Arg Name | TF2 Arg Name | Note |
695
+ | :-------------------- | :--------------- | :------------------------- |
696
+ | `labels` | `y_true` | In `__call__()` method |
697
+ | `predictions` | `y_pred` | In `__call__()` method |
698
+ | `weights` | `sample_weight` | In `__call__()` method. |
699
+ : : : The shape requirements for `sample_weight` is different from :
700
+ : : : `weights`. Please check the [argument definition][api_docs] for :
701
+ : : : details. :
702
+ | `scope` | Not supported | - |
703
+ | `loss_collection` | Not supported | Losses should be tracked |
704
+ : : : explicitly or with Keras APIs, for example, [add_loss][add_loss], :
705
+ : : : instead of via collections :
706
+ | `reduction` | `reduction` | In constructor. Value of |
707
+ : : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
708
+ : : : `tf.compat.v1.losses.Reduction.SUM`, :
709
+ : : : `tf.compat.v1.losses.Reduction.NONE` in :
710
+ : : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to :
711
+ : : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
712
+ : : : `tf.keras.losses.Reduction.SUM`, :
713
+ : : : `tf.keras.losses.Reduction.NONE`, respectively. If you :
714
+ : : : used other value for `reduction`, including the default value :
715
+ : : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is :
716
+ : : : no directly corresponding value. Please modify the loss :
717
+ : : : implementation manually. :
718
+
719
+ [add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss
720
+ [api_docs]:https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanSquaredError#__call__
721
+
722
+
723
+ #### Before & After Usage Example
724
+
725
+ Before:
726
+
727
+ >>> y_true = [1, 2, 3]
728
+ >>> y_pred = [1, 3, 5]
729
+ >>> weights = [0, 1, 0.25]
730
+ >>> # samples with zero-weight are excluded from calculation when `reduction`
731
+ >>> # argument is set to default value `Reduction.SUM_BY_NONZERO_WEIGHTS`
732
+ >>> tf.compat.v1.losses.mean_squared_error(
733
+ ... labels=y_true,
734
+ ... predictions=y_pred,
735
+ ... weights=weights).numpy()
736
+ 1.0
737
+
738
+ >>> tf.compat.v1.losses.mean_squared_error(
739
+ ... labels=y_true,
740
+ ... predictions=y_pred,
741
+ ... weights=weights,
742
+ ... reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE).numpy()
743
+ 0.66667
744
+
745
+ After:
746
+
747
+ >>> y_true = [[1.0], [2.0], [3.0]]
748
+ >>> y_pred = [[1.0], [3.0], [5.0]]
749
+ >>> weights = [1, 1, 0.25]
750
+ >>> mse = tf.keras.losses.MeanSquaredError(
751
+ ... reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
752
+ >>> mse(y_true=y_true, y_pred=y_pred, sample_weight=weights).numpy()
753
+ 0.66667
754
+
755
+ @end_compatibility
756
+ """
757
+ if labels is None:
758
+ raise ValueError("Argument `labels` must not be None.")
759
+ if predictions is None:
760
+ raise ValueError("Argument `predictions` must not be None.")
761
+ with ops.name_scope(scope, "mean_squared_error",
762
+ (predictions, labels, weights)) as scope:
763
+ predictions = math_ops.cast(predictions, dtype=dtypes.float32)
764
+ labels = math_ops.cast(labels, dtype=dtypes.float32)
765
+ predictions.get_shape().assert_is_compatible_with(labels.get_shape())
766
+ losses = math_ops.squared_difference(predictions, labels)
767
+ return compute_weighted_loss(
768
+ losses, weights, scope, loss_collection, reduction=reduction)
769
+
770
+
771
+ @tf_export(v1=["losses.sigmoid_cross_entropy"])
772
+ @dispatch.add_dispatch_support
773
+ def sigmoid_cross_entropy(
774
+ multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
775
+ loss_collection=ops.GraphKeys.LOSSES,
776
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
777
+ """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
778
+
779
+ `weights` acts as a coefficient for the loss. If a scalar is provided,
780
+ then the loss is simply scaled by the given value. If `weights` is a
781
+ tensor of shape `[batch_size]`, then the loss weights apply to each
782
+ corresponding sample.
783
+
784
+ If `label_smoothing` is nonzero, smooth the labels towards 1/2:
785
+
786
+ new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
787
+ + 0.5 * label_smoothing
788
+
789
+ Args:
790
+ multi_class_labels: `[batch_size, num_classes]` target integer labels in
791
+ `{0, 1}`.
792
+ logits: Float `[batch_size, num_classes]` logits outputs of the network.
793
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
794
+ `multi_class_labels`, and must be broadcastable to `multi_class_labels`
795
+ (i.e., all dimensions must be either `1`, or the same as the
796
+ corresponding `losses` dimension).
797
+ label_smoothing: If greater than `0` then smooth the labels.
798
+ scope: The scope for the operations performed in computing the loss.
799
+ loss_collection: collection to which the loss will be added.
800
+ reduction: Type of reduction to apply to loss.
801
+
802
+ Returns:
803
+ Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
804
+ `NONE`, this has the same shape as `logits`; otherwise, it is scalar.
805
+
806
+ Raises:
807
+ ValueError: If the shape of `logits` doesn't match that of
808
+ `multi_class_labels` or if the shape of `weights` is invalid, or if
809
+ `weights` is None. Also if `multi_class_labels` or `logits` is None.
810
+
811
+ @compatibility(eager)
812
+ The `loss_collection` argument is ignored when executing eagerly. Consider
813
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
814
+ @end_compatibility
815
+ """
816
+ if multi_class_labels is None:
817
+ raise ValueError("Argument `multi_class_labels` must not be None.")
818
+ if logits is None:
819
+ raise ValueError("Argument `logits` must not be None.")
820
+ with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
821
+ (logits, multi_class_labels, weights)) as scope:
822
+ logits = ops.convert_to_tensor(logits)
823
+ multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
824
+ logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
825
+
826
+ if label_smoothing > 0:
827
+ multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
828
+ 0.5 * label_smoothing)
829
+
830
+ losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
831
+ logits=logits,
832
+ name="xentropy")
833
+ return compute_weighted_loss(
834
+ losses, weights, scope, loss_collection, reduction=reduction)
835
+
836
+
837
+ @tf_export(v1=["losses.softmax_cross_entropy"])
838
+ @dispatch.add_dispatch_support
839
+ def softmax_cross_entropy(
840
+ onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
841
+ loss_collection=ops.GraphKeys.LOSSES,
842
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
843
+ r"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2.
844
+
845
+ `weights` acts as a coefficient for the loss. If a scalar is provided,
846
+ then the loss is simply scaled by the given value. If `weights` is a
847
+ tensor of shape `[batch_size]`, then the loss weights apply to each
848
+ corresponding sample.
849
+
850
+ If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
851
+ new_onehot_labels = onehot_labels * (1 - label_smoothing)
852
+ + label_smoothing / num_classes
853
+
854
+ Note that `onehot_labels` and `logits` must have the same shape,
855
+ e.g. `[batch_size, num_classes]`. The shape of `weights` must be
856
+ broadcastable to loss, whose shape is decided by the shape of `logits`.
857
+ In case the shape of `logits` is `[batch_size, num_classes]`, loss is
858
+ a `Tensor` of shape `[batch_size]`.
859
+
860
+ Args:
861
+ onehot_labels: One-hot-encoded labels.
862
+ logits: Logits outputs of the network.
863
+ weights: Optional `Tensor` that is broadcastable to loss.
864
+ label_smoothing: If greater than 0 then smooth the labels.
865
+ scope: the scope for the operations performed in computing the loss.
866
+ loss_collection: collection to which the loss will be added.
867
+ reduction: Type of reduction to apply to loss.
868
+
869
+ Returns:
870
+ Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
871
+ `NONE`, this has shape `[batch_size]`; otherwise, it is scalar.
872
+
873
+ Raises:
874
+ ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
875
+ or if the shape of `weights` is invalid or if `weights` is None. Also if
876
+ `onehot_labels` or `logits` is None.
877
+
878
+ @compatibility(TF2)
879
+
880
+ `tf.compat.v1.losses.softmax_cross_entropy` is mostly compatible with eager
881
+ execution and `tf.function`. But, the `loss_collection` argument is
882
+ ignored when executing eagerly and no loss will be written to the loss
883
+ collections. You will need to either hold on to the return value manually
884
+ or rely on `tf.keras.Model` loss tracking.
885
+
886
+
887
+ To switch to native TF2 style, instantiate the
888
+ `tf.keras.losses.CategoricalCrossentropy` class with `from_logits` set
889
+ as `True` and call the object instead.
890
+
891
+
892
+ #### Structural Mapping to Native TF2
893
+
894
+ Before:
895
+
896
+ ```python
897
+ loss = tf.compat.v1.losses.softmax_cross_entropy(
898
+ onehot_labels=onehot_labels,
899
+ logits=logits,
900
+ weights=weights,
901
+ label_smoothing=smoothing)
902
+ ```
903
+
904
+ After:
905
+
906
+ ```python
907
+ loss_fn = tf.keras.losses.CategoricalCrossentropy(
908
+ from_logits=True,
909
+ label_smoothing=smoothing)
910
+ loss = loss_fn(
911
+ y_true=onehot_labels,
912
+ y_pred=logits,
913
+ sample_weight=weights)
914
+ ```
915
+
916
+ #### How to Map Arguments
917
+
918
+ | TF1 Arg Name | TF2 Arg Name | Note |
919
+ | :-------------------- | :--------------- | :------------------------- |
920
+ | - | `from_logits` | Set `from_logits` as True |
921
+ : : : to have identical behavior :
922
+ | `onehot_labels` | `y_true` | In `__call__()` method |
923
+ | `logits` | `y_pred` | In `__call__()` method |
924
+ | `weights` | `sample_weight` | In `__call__()` method |
925
+ | `label_smoothing` | `label_smoothing`| In constructor |
926
+ | `scope` | Not supported | - |
927
+ | `loss_collection` | Not supported | Losses should be tracked |
928
+ : : : explicitly or with Keras :
929
+ : : : APIs, for example, :
930
+ : : : [add_loss][add_loss], :
931
+ : : : instead of via collections :
932
+ | `reduction` | `reduction` | In constructor. Value of |
933
+ : : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
934
+ : : : `tf.compat.v1.losses.Reduction.SUM`, :
935
+ : : : `tf.compat.v1.losses.Reduction.NONE` in :
936
+ : : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to :
937
+ : : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
938
+ : : : `tf.keras.losses.Reduction.SUM`, :
939
+ : : : `tf.keras.losses.Reduction.NONE`, respectively. If you :
940
+ : : : used other value for `reduction`, including the default value :
941
+ : : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is :
942
+ : : : no directly corresponding value. Please modify the loss :
943
+ : : : implementation manually. :
944
+
945
+ [add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss
946
+
947
+
948
+ #### Before & After Usage Example
949
+
950
+ Before:
951
+
952
+ >>> y_true = [[0, 1, 0], [0, 0, 1]]
953
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
954
+ >>> weights = [0.3, 0.7]
955
+ >>> smoothing = 0.2
956
+ >>> tf.compat.v1.losses.softmax_cross_entropy(y_true, y_pred, weights=weights,
957
+ ... label_smoothing=smoothing).numpy()
958
+ 0.57618
959
+
960
+ After:
961
+
962
+ >>> cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True,
963
+ ... label_smoothing=smoothing)
964
+ >>> cce(y_true, y_pred, sample_weight=weights).numpy()
965
+ 0.57618
966
+
967
+ @end_compatibility
968
+ """
969
+ if onehot_labels is None:
970
+ raise ValueError("Argument `onehot_labels` must not be None.")
971
+ if logits is None:
972
+ raise ValueError("Argument `logits` must not be None.")
973
+ with ops.name_scope(scope, "softmax_cross_entropy_loss",
974
+ (logits, onehot_labels, weights)) as scope:
975
+ logits = ops.convert_to_tensor(logits)
976
+ onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
977
+ logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
978
+
979
+ if label_smoothing > 0:
980
+ num_classes = math_ops.cast(
981
+ array_ops.shape(onehot_labels)[-1], logits.dtype)
982
+ smooth_positives = 1.0 - label_smoothing
983
+ smooth_negatives = label_smoothing / num_classes
984
+ onehot_labels = onehot_labels * smooth_positives + smooth_negatives
985
+
986
+ onehot_labels = array_ops.stop_gradient(
987
+ onehot_labels, name="labels_stop_gradient")
988
+ losses = nn.softmax_cross_entropy_with_logits_v2(
989
+ labels=onehot_labels, logits=logits, name="xentropy")
990
+
991
+ return compute_weighted_loss(
992
+ losses, weights, scope, loss_collection, reduction=reduction)
993
+
994
+
995
+ # TODO(ptucker): Merge this with similar method in metrics_impl.
996
+ def _remove_squeezable_dimensions(
997
+ labels, predictions, weights=None, expected_rank_diff=0):
998
+ """Internal version of _remove_squeezable_dimensions which handles weights.
999
+
1000
+ Squeezes `predictions` and `labels` if their ranks differ from expected by
1001
+ exactly 1.
1002
+ Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
1003
+
1004
+ This will use static shape if available. Otherwise, it will add graph
1005
+ operations, which could result in a performance hit.
1006
+
1007
+ Args:
1008
+ labels: Label values, a `Tensor` whose dimensions match `predictions`.
1009
+ predictions: Predicted values, a `Tensor` of arbitrary dimensions.
1010
+ weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
1011
+ and its rank is 1 more than the new rank of `labels`.
1012
+ expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
1013
+
1014
+ Returns:
1015
+ Tuple of `predictions`, `labels` and `weights`, possibly with the last
1016
+ dimension squeezed.
1017
+ """
1018
+ labels, predictions = confusion_matrix.remove_squeezable_dimensions(
1019
+ labels, predictions, expected_rank_diff=expected_rank_diff)
1020
+
1021
+ if weights is not None:
1022
+ weights = ops.convert_to_tensor(weights)
1023
+ labels_rank = labels.get_shape().ndims
1024
+ weights_shape = weights.get_shape()
1025
+ weights_rank = weights_shape.ndims
1026
+
1027
+ if (labels_rank is not None) and (weights_rank is not None):
1028
+ # Use static rank.
1029
+ rank_diff = weights_rank - labels_rank
1030
+ if rank_diff == 1:
1031
+ weights = array_ops.squeeze(weights, [-1])
1032
+ return labels, predictions, weights
1033
+
1034
+ # Use dynamic rank.
1035
+ rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
1036
+ if (weights_rank is None) or (
1037
+ weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):
1038
+ weights = cond.cond(
1039
+ math_ops.equal(1, rank_diff),
1040
+ lambda: array_ops.squeeze(weights, [-1]),
1041
+ lambda: weights)
1042
+
1043
+ return labels, predictions, weights
1044
+
1045
+
1046
+ @tf_export(v1=["losses.sparse_softmax_cross_entropy"])
1047
+ @dispatch.add_dispatch_support
1048
+ def sparse_softmax_cross_entropy(
1049
+ labels, logits, weights=1.0, scope=None,
1050
+ loss_collection=ops.GraphKeys.LOSSES,
1051
+ reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
1052
+ """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
1053
+
1054
+ `weights` acts as a coefficient for the loss. If a scalar is provided,
1055
+ then the loss is simply scaled by the given value. If `weights` is a
1056
+ tensor of shape `[batch_size]`, then the loss weights apply to each
1057
+ corresponding sample.
1058
+
1059
+ Args:
1060
+ labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
1061
+ `labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
1062
+ must be an index in `[0, num_classes)`. Other values will raise an
1063
+ exception when this op is run on CPU, and return `NaN` for corresponding
1064
+ loss and gradient rows on GPU.
1065
+ logits: Unscaled log probabilities of shape
1066
+ `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
1067
+ `float64`.
1068
+ weights: Coefficients for the loss. This must be scalar or broadcastable to
1069
+ `labels` (i.e. same rank and each dimension is either 1 or the same).
1070
+ scope: the scope for the operations performed in computing the loss.
1071
+ loss_collection: collection to which the loss will be added.
1072
+ reduction: Type of reduction to apply to loss.
1073
+
1074
+ Returns:
1075
+ Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
1076
+ `NONE`, this has the same shape as `labels`; otherwise, it is scalar.
1077
+
1078
+ Raises:
1079
+ ValueError: If the shapes of `logits`, `labels`, and `weights` are
1080
+ incompatible, or if any of them are None.
1081
+
1082
+ @compatibility(eager)
1083
+ The `loss_collection` argument is ignored when executing eagerly. Consider
1084
+ holding on to the return value or collecting losses via a `tf.keras.Model`.
1085
+ @end_compatibility
1086
+ """
1087
+ if labels is None:
1088
+ raise ValueError("Argument `labels` must not be None.")
1089
+ if logits is None:
1090
+ raise ValueError("Argument `logits` must not be None.")
1091
+ with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
1092
+ (logits, labels, weights)) as scope:
1093
+ # As documented above in Args, labels contain class IDs and logits contains
1094
+ # 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
1095
+ # therefore, expected_rank_diff=1.
1096
+ labels, logits, weights = _remove_squeezable_dimensions(
1097
+ labels, logits, weights, expected_rank_diff=1)
1098
+ losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
1099
+ logits=logits,
1100
+ name="xentropy")
1101
+ return compute_weighted_loss(
1102
+ losses, weights, scope, loss_collection, reduction=reduction)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Utilities for manipulating the loss collections."""
16
+
17
+ from tensorflow.python.eager import context
18
+ from tensorflow.python.framework import constant_op
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.framework import ops
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.ops import check_ops
23
+ from tensorflow.python.ops import cond
24
+ from tensorflow.python.ops import confusion_matrix
25
+ from tensorflow.python.ops import math_ops
26
+ from tensorflow.python.util import tf_contextlib
27
+ from tensorflow.python.util.tf_export import tf_export
28
+
29
+
30
+ def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):
31
+ """Squeeze or expand last dimension if needed.
32
+
33
+ 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
34
+ (using `confusion_matrix.remove_squeezable_dimensions`).
35
+ 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
36
+ from the new rank of `y_pred`.
37
+ If `sample_weight` is scalar, it is kept scalar.
38
+
39
+ This will use static shape if available. Otherwise, it will add graph
40
+ operations, which could result in a performance hit.
41
+
42
+ Args:
43
+ y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
44
+ y_true: Optional label `Tensor` whose dimensions match `y_pred`.
45
+ sample_weight: Optional weight scalar or `Tensor` whose dimensions match
46
+ `y_pred`.
47
+
48
+ Returns:
49
+ Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
50
+ the last dimension squeezed,
51
+ `sample_weight` could be extended by one dimension.
52
+ If `sample_weight` is None, (y_pred, y_true) is returned.
53
+ """
54
+ y_pred_shape = y_pred.shape
55
+ y_pred_rank = y_pred_shape.ndims
56
+ if y_true is not None:
57
+
58
+ # If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
59
+ # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
60
+ # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
61
+ # In this case, we should not try to remove squeezable dimension.
62
+ y_true_shape = y_true.shape
63
+ y_true_rank = y_true_shape.ndims
64
+ if (y_true_rank is not None) and (y_pred_rank is not None):
65
+ # Use static rank for `y_true` and `y_pred`.
66
+ if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
67
+ y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
68
+ y_true, y_pred)
69
+ else:
70
+ # Use dynamic rank.
71
+ rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)
72
+ squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions( # pylint: disable=g-long-lambda
73
+ y_true, y_pred)
74
+ is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])
75
+ maybe_squeeze_dims = lambda: cond.cond( # pylint: disable=g-long-lambda
76
+ is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
77
+ y_true, y_pred = cond.cond(
78
+ math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)
79
+
80
+ if sample_weight is None:
81
+ return y_pred, y_true
82
+
83
+ weights_shape = sample_weight.shape
84
+ weights_rank = weights_shape.ndims
85
+ if weights_rank == 0: # If weights is scalar, do nothing.
86
+ return y_pred, y_true, sample_weight
87
+
88
+ if (y_pred_rank is not None) and (weights_rank is not None):
89
+ # Use static rank.
90
+ if weights_rank - y_pred_rank == 1:
91
+ sample_weight = array_ops.squeeze(sample_weight, [-1])
92
+ elif y_pred_rank - weights_rank == 1:
93
+ sample_weight = array_ops.expand_dims(sample_weight, [-1])
94
+ return y_pred, y_true, sample_weight
95
+
96
+ # Use dynamic rank.
97
+ weights_rank_tensor = array_ops.rank(sample_weight)
98
+ rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
99
+ maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
100
+
101
+ def _maybe_expand_weights():
102
+ expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1])
103
+ return cond.cond(
104
+ math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight)
105
+
106
+ def _maybe_adjust_weights():
107
+ return cond.cond(
108
+ math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
109
+ _maybe_expand_weights)
110
+
111
+ # squeeze or expand last dim of `sample_weight` if its rank differs by 1
112
+ # from the new rank of `y_pred`.
113
+ sample_weight = cond.cond(
114
+ math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
115
+ _maybe_adjust_weights)
116
+ return y_pred, y_true, sample_weight
117
+
118
+
119
+ def scale_losses_by_sample_weight(losses, sample_weight):
120
+ """Scales loss values by the given sample weights.
121
+
122
+ `sample_weight` dimensions are updated to match with the dimension of `losses`
123
+ if possible by using squeeze/expand/broadcast.
124
+
125
+ Args:
126
+ losses: Loss tensor.
127
+ sample_weight: Sample weights tensor.
128
+
129
+ Returns:
130
+ `losses` scaled by `sample_weight` with dtype float32.
131
+ """
132
+ # TODO(psv): Handle the casting here in a better way, eg. if losses is float64
133
+ # we do not want to lose precision.
134
+ losses = math_ops.cast(losses, dtypes.float32)
135
+ sample_weight = math_ops.cast(sample_weight, dtypes.float32)
136
+
137
+ # Update dimensions of `sample_weight` to match with `losses` if possible.
138
+ losses, _, sample_weight = squeeze_or_expand_dimensions(
139
+ losses, None, sample_weight)
140
+ return math_ops.multiply(losses, sample_weight)
141
+
142
+
143
+ @tf_contextlib.contextmanager
144
+ def check_per_example_loss_rank(per_example_loss):
145
+ """Context manager that checks that the rank of per_example_loss is at least 1.
146
+
147
+ Args:
148
+ per_example_loss: Per example loss tensor.
149
+
150
+ Yields:
151
+ A context manager.
152
+ """
153
+ loss_rank = per_example_loss.shape.rank
154
+ if loss_rank is not None:
155
+ # Handle static rank.
156
+ if loss_rank == 0:
157
+ raise ValueError(
158
+ "Invalid value passed for `per_example_loss`. Expected a tensor with "
159
+ f"at least rank 1. Received per_example_loss={per_example_loss} with "
160
+ f"rank {loss_rank}")
161
+ yield
162
+ else:
163
+ # Handle dynamic rank.
164
+ with ops.control_dependencies([
165
+ check_ops.assert_greater_equal(
166
+ array_ops.rank(per_example_loss),
167
+ math_ops.cast(1, dtype=dtypes.int32),
168
+ message="Invalid value passed for `per_example_loss`. Expected a "
169
+ "tensor with at least rank 1.")
170
+ ]):
171
+ yield
172
+
173
+
174
+ @tf_export(v1=["losses.add_loss"])
175
+ def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
176
+ """Adds a externally defined loss to the collection of losses.
177
+
178
+ Args:
179
+ loss: A loss `Tensor`.
180
+ loss_collection: Optional collection to add the loss to.
181
+ """
182
+ # Since we have no way of figuring out when a training iteration starts or
183
+ # ends, holding on to a loss when executing eagerly is indistinguishable from
184
+ # leaking memory. We instead leave the collection empty.
185
+ if loss_collection and not context.executing_eagerly():
186
+ ops.add_to_collection(loss_collection, loss)
187
+
188
+
189
+ @tf_export(v1=["losses.get_losses"])
190
+ def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
191
+ """Gets the list of losses from the loss_collection.
192
+
193
+ Args:
194
+ scope: An optional scope name for filtering the losses to return.
195
+ loss_collection: Optional losses collection.
196
+
197
+ Returns:
198
+ a list of loss tensors.
199
+ """
200
+ return ops.get_collection(loss_collection, scope)
201
+
202
+
203
+ @tf_export(v1=["losses.get_regularization_losses"])
204
+ def get_regularization_losses(scope=None):
205
+ """Gets the list of regularization losses.
206
+
207
+ Args:
208
+ scope: An optional scope name for filtering the losses to return.
209
+
210
+ Returns:
211
+ A list of regularization losses as Tensors.
212
+ """
213
+ return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
214
+
215
+
216
+ @tf_export(v1=["losses.get_regularization_loss"])
217
+ def get_regularization_loss(scope=None, name="total_regularization_loss"):
218
+ """Gets the total regularization loss.
219
+
220
+ Args:
221
+ scope: An optional scope name for filtering the losses to return.
222
+ name: The name of the returned tensor.
223
+
224
+ Returns:
225
+ A scalar regularization loss.
226
+ """
227
+ losses = get_regularization_losses(scope)
228
+ if losses:
229
+ return math_ops.add_n(losses, name=name)
230
+ else:
231
+ return constant_op.constant(0.0)
232
+
233
+
234
+ @tf_export(v1=["losses.get_total_loss"])
235
+ def get_total_loss(add_regularization_losses=True,
236
+ name="total_loss",
237
+ scope=None):
238
+ """Returns a tensor whose value represents the total loss.
239
+
240
+ In particular, this adds any losses you have added with `tf.add_loss()` to
241
+ any regularization losses that have been added by regularization parameters
242
+ on layers constructors e.g. `tf.layers`. Be very sure to use this if you
243
+ are constructing a loss_op manually. Otherwise regularization arguments
244
+ on `tf.layers` methods will not function.
245
+
246
+ Args:
247
+ add_regularization_losses: A boolean indicating whether or not to use the
248
+ regularization losses in the sum.
249
+ name: The name of the returned tensor.
250
+ scope: An optional scope name for filtering the losses to return. Note that
251
+ this filters the losses added with `tf.add_loss()` as well as the
252
+ regularization losses to that scope.
253
+
254
+ Returns:
255
+ A `Tensor` whose value represents the total loss.
256
+
257
+ Raises:
258
+ ValueError: if `losses` is not iterable.
259
+ """
260
+ losses = get_losses(scope=scope)
261
+ if add_regularization_losses:
262
+ losses += get_regularization_losses(scope=scope)
263
+ return math_ops.add_n(losses, name=name)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__init__.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """# tf.experimental.numpy: NumPy API on TensorFlow.
16
+
17
+ This module provides a subset of NumPy API, built on top of TensorFlow
18
+ operations. APIs are based on and have been tested with NumPy 1.16 version.
19
+
20
+ The set of supported APIs may be expanded over time. Also future releases may
21
+ change the baseline version of NumPy API being supported. A list of some
22
+ systematic differences with NumPy is listed later in the "Differences with
23
+ NumPy" section.
24
+
25
+ ## Getting Started
26
+
27
+ Please also see [TensorFlow NumPy Guide](
28
+ https://www.tensorflow.org/guide/tf_numpy).
29
+
30
+ In the code snippets below, we will assume that `tf.experimental.numpy` is
31
+ imported as `tnp` and NumPy is imported as `np`
32
+
33
+ ```python
34
+ print(tnp.ones([2,1]) + np.ones([1, 2]))
35
+ ```
36
+
37
+ ## Types
38
+
39
+ The module provides an `ndarray` class which wraps an immutable `tf.Tensor`.
40
+ Additional functions are provided which accept array-like objects. Here
41
+ array-like objects include `ndarrays` as defined by this module, as well as
42
+ `tf.Tensor`, in addition to types accepted by NumPy.
43
+
44
+ A subset of NumPy dtypes are supported. Type promotion* follows NumPy
45
+ semantics.
46
+
47
+ **Note**: A new type promotion that offers a lot of advantages over the old
48
+ type promotion is now available. Learn more about enabling the new
49
+ type promotion
50
+ [here](https://www.tensorflow.org/guide/tf_numpy_type_promotion).
51
+
52
+ ```python
53
+ print(tnp.ones([1, 2], dtype=tnp.int16) + tnp.ones([2, 1], dtype=tnp.uint8))
54
+ ```
55
+
56
+ ## Array Interface
57
+
58
+ The `ndarray` class implements the `__array__` interface. This should allow
59
+ these objects to be passed into contexts that expect a NumPy or array-like
60
+ object (e.g. matplotlib).
61
+
62
+ ```python
63
+ np.sum(tnp.ones([1, 2]) + np.ones([2, 1]))
64
+ ```
65
+
66
+
67
+ ## TF Interoperability
68
+
69
+ The TF-NumPy API calls can be interleaved with TensorFlow calls
70
+ without incurring Tensor data copies. This is true even if the `ndarray` or
71
+ `tf.Tensor` is placed on a non-CPU device.
72
+
73
+ In general, the expected behavior should be on par with that of code involving
74
+ `tf.Tensor` and running stateless TensorFlow functions on them.
75
+
76
+ ```python
77
+ tnp.sum(tnp.ones([1, 2]) + tf.ones([2, 1]))
78
+ ```
79
+
80
+ Note that the `__array_priority__` is currently chosen to be lower than
81
+ `tf.Tensor`. Hence the `+` operator above returns a `tf.Tensor`.
82
+
83
+ Additional examples of interoperability include:
84
+
85
+ * using `with tf.GradientTape()` scope to compute gradients through the
86
+ TF-NumPy API calls.
87
+ * using `tf.distribution.Strategy` scope for distributed execution
88
+ * using `tf.vectorized_map()` for speeding up code using auto-vectorization
89
+
90
+
91
+
92
+ ## Device Support
93
+
94
+ Given that `ndarray` and functions wrap TensorFlow constructs, the code will
95
+ have GPU and TPU support on par with TensorFlow. Device placement can be
96
+ controlled by using `with tf.device` scopes. Note that these devices could
97
+ be local or remote.
98
+
99
+ ```python
100
+ with tf.device("GPU:0"):
101
+ x = tnp.ones([1, 2])
102
+ print(tf.convert_to_tensor(x).device)
103
+ ```
104
+
105
+ ## Graph and Eager Modes
106
+
107
+ Eager mode execution should typically match NumPy semantics of executing
108
+ op-by-op. However the same code can be executed in graph mode, by putting it
109
+ inside a `tf.function`. The function body can contain NumPy code, and the inputs
110
+ can be `ndarray` as well.
111
+
112
+ ```python
113
+ @tf.function
114
+ def f(x, y):
115
+ return tnp.sum(x + y)
116
+
117
+ f(tnp.ones([1, 2]), tf.ones([2, 1]))
118
+ ```
119
+ Python control flow based on `ndarray` values will be translated by
120
+ [autograph](https://www.tensorflow.org/code/tensorflow/python/autograph/g3doc/reference/index.md)
121
+ into `tf.cond` and `tf.while_loop` constructs. The code can be XLA compiled
122
+ for further optimizations.
123
+
124
+ However, note that graph mode execution can change behavior of certain
125
+ operations since symbolic execution may not have information that is computed
126
+ during runtime. Some differences are:
127
+
128
+ * Shapes can be incomplete or unknown in graph mode. This means that
129
+ `ndarray.shape`, `ndarray.size` and `ndarray.ndim` can return `ndarray`
130
+ objects instead of returning integer (or tuple of integer) values.
131
+ * `__len__`, `__iter__` and `__index__` properties of `ndarray`
132
+ may similarly not be supported in graph mode. Code using these
133
+ may need to change to explicit shape operations or control flow
134
+ constructs.
135
+ * Also note the [autograph limitations](
136
+ https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md).
137
+
138
+
139
+ ## Mutation and Variables
140
+
141
+ `ndarrays` currently wrap immutable `tf.Tensor`. Hence mutation
142
+ operations like slice assigns are not supported. This may change in the future.
143
+ Note however that one can directly construct a `tf.Variable` and use that with
144
+ the TF-NumPy APIs.
145
+
146
+ ```python
147
+ tf_var = tf.Variable(2.0)
148
+ tf_var.assign_add(tnp.square(tf_var))
149
+ ```
150
+
151
+ ## Differences with NumPy
152
+
153
+ Here is a non-exhaustive list of differences:
154
+
155
+ * Not all dtypes are currently supported. e.g. `np.float96`, `np.float128`.
156
+ `np.object_`, `np.str_`, `np.recarray` types are not supported.
157
+ * `ndarray` storage is in C order only. Fortran order, views, `stride_tricks`
158
+ are not supported.
159
+ * Only a subset of functions and modules are supported. This set will be
160
+ expanded over time. For supported functions, some arguments or argument
161
+ values may not be supported. These differences are generally provided in the
162
+ function comments. Full `ufunc` support is also not provided.
163
+ * Buffer mutation is currently not supported. `ndarrays` wrap immutable
164
+ tensors. This means that output buffer arguments (e.g. `out` in ufuncs) are
165
+ not supported.
166
+ * NumPy C API is not supported. NumPy's Cython and Swig integration are not
167
+ supported.
168
+
169
+ API docstring: tensorflow.experimental.numpy
170
+ """
171
+ # TODO(wangpeng): Append `tf_export`ed symbols to the comments above.
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_array_ops.cpython-310.pyc ADDED
Binary file (51.5 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_arrays.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_config.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_dtypes.cpython-310.pyc ADDED
Binary file (4.15 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_math_ops.cpython-310.pyc ADDED
Binary file (46.3 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_random.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_utils.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_array_ops.py ADDED
@@ -0,0 +1,2111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Common array methods."""
16
+ # pylint: disable=g-direct-tensorflow-import
17
+
18
+ import builtins
19
+ import enum
20
+ import functools
21
+ import math
22
+ import numbers
23
+
24
+ import numpy as np
25
+
26
+ from tensorflow.python.framework import constant_op
27
+ from tensorflow.python.framework import dtypes
28
+ from tensorflow.python.framework import ops
29
+ from tensorflow.python.framework import tensor as tensor_lib
30
+ from tensorflow.python.framework import tensor_shape
31
+ from tensorflow.python.ops import array_ops
32
+ from tensorflow.python.ops import array_ops_stack
33
+ from tensorflow.python.ops import clip_ops
34
+ from tensorflow.python.ops import control_flow_assert
35
+ from tensorflow.python.ops import linalg_ops
36
+ from tensorflow.python.ops import manip_ops
37
+ from tensorflow.python.ops import math_ops
38
+ from tensorflow.python.ops import sort_ops
39
+ from tensorflow.python.ops.numpy_ops import np_arrays
40
+ from tensorflow.python.ops.numpy_ops import np_dtypes
41
+ from tensorflow.python.ops.numpy_ops import np_utils
42
+ from tensorflow.python.types import core as core_tf_types
43
+ from tensorflow.python.util import nest
44
+ from tensorflow.python.util import tf_export
45
+
46
+
47
+ newaxis = np.newaxis
48
+ tf_export.tf_export('experimental.numpy.newaxis', v1=[]).export_constant(
49
+ __name__, 'newaxis'
50
+ )
51
+
52
+
53
+ @tf_export.tf_export('experimental.numpy.empty', v1=[])
54
+ @np_utils.np_doc('empty')
55
+ def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
56
+ return zeros(shape, dtype)
57
+
58
+
59
+ @tf_export.tf_export('experimental.numpy.empty_like', v1=[])
60
+ @np_utils.np_doc('empty_like')
61
+ def empty_like(a, dtype=None):
62
+ return zeros_like(a, dtype)
63
+
64
+
65
+ @tf_export.tf_export('experimental.numpy.zeros', v1=[])
66
+ @np_utils.np_doc('zeros')
67
+ def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
68
+ dtype = (
69
+ np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type()
70
+ )
71
+ return array_ops.zeros(shape, dtype=dtype)
72
+
73
+
74
+ @tf_export.tf_export('experimental.numpy.zeros_like', v1=[])
75
+ @np_utils.np_doc('zeros_like')
76
+ def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
77
+ dtype = np_utils.result_type_unary(a, dtype)
78
+
79
+ dtype = dtypes.as_dtype(dtype) # Work around b/149877262
80
+ return array_ops.zeros_like(a, dtype)
81
+
82
+
83
+ @tf_export.tf_export('experimental.numpy.ones', v1=[])
84
+ @np_utils.np_doc('ones')
85
+ def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
86
+ if dtype:
87
+ dtype = np_utils.result_type(dtype)
88
+ return array_ops.ones(shape, dtype=dtype)
89
+
90
+
91
+ @tf_export.tf_export('experimental.numpy.ones_like', v1=[])
92
+ @np_utils.np_doc('ones_like')
93
+ def ones_like(a, dtype=None):
94
+ dtype = np_utils.result_type_unary(a, dtype)
95
+ return array_ops.ones_like(a, dtype)
96
+
97
+
98
+ @tf_export.tf_export('experimental.numpy.eye', v1=[])
99
+ @np_utils.np_doc('eye')
100
+ def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
101
+ if dtype:
102
+ dtype = np_utils.result_type(dtype)
103
+ if not M:
104
+ M = N
105
+ # Making sure N, M and k are `int`
106
+ N = int(N)
107
+ M = int(M)
108
+ k = int(k)
109
+ if k >= M or -k >= N:
110
+ # tf.linalg.diag will raise an error in this case
111
+ return zeros([N, M], dtype=dtype)
112
+ if k == 0:
113
+ return linalg_ops.eye(N, M, dtype=dtype)
114
+ # We need the precise length, otherwise tf.linalg.diag will raise an error
115
+ diag_len = builtins.min(N, M)
116
+ if k > 0:
117
+ if N >= M:
118
+ diag_len -= k
119
+ elif N + k > M:
120
+ diag_len = M - k
121
+ elif k <= 0:
122
+ if M >= N:
123
+ diag_len += k
124
+ elif M - k > N:
125
+ diag_len = N + k
126
+ diagonal_ = array_ops.ones([diag_len], dtype=dtype)
127
+ return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
128
+
129
+
130
+ @tf_export.tf_export('experimental.numpy.identity', v1=[])
131
+ @np_utils.np_doc('identity')
132
+ def identity(n, dtype=float):
133
+ return eye(N=n, M=n, dtype=dtype)
134
+
135
+
136
+ @tf_export.tf_export('experimental.numpy.full', v1=[])
137
+ @np_utils.np_doc('full')
138
+ def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
139
+ if not isinstance(shape, np_arrays.ndarray):
140
+ shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
141
+ shape = atleast_1d(shape)
142
+ fill_value = asarray(fill_value, dtype=dtype)
143
+ return array_ops.broadcast_to(fill_value, shape)
144
+
145
+
146
+ # Using doc only here since np full_like signature doesn't seem to have the
147
+ # shape argument (even though it exists in the documentation online).
148
+ @tf_export.tf_export('experimental.numpy.full_like', v1=[])
149
+ @np_utils.np_doc_only('full_like')
150
+ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
151
+ """order, subok and shape arguments mustn't be changed."""
152
+ if order != 'K':
153
+ raise ValueError('Non-standard orders are not supported.')
154
+ if not subok:
155
+ raise ValueError('subok being False is not supported.')
156
+ if shape:
157
+ raise ValueError('Overriding the shape is not supported.')
158
+
159
+ a = asarray(a)
160
+ dtype = dtype or np_utils.result_type(a)
161
+ fill_value = asarray(fill_value, dtype=dtype)
162
+ return array_ops.broadcast_to(fill_value, array_ops.shape(a))
163
+
164
+
165
+ def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
166
+ """Main implementation of np.array()."""
167
+ result_t = val
168
+
169
+ if not isinstance(result_t, tensor_lib.Tensor):
170
+ dtype = np_utils.result_type_unary(result_t, dtype)
171
+ # We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
172
+ # convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
173
+ # while np.array allows them. We need to convert-then-cast.
174
+
175
+ # EagerTensor conversion complains about "mixed types" when converting
176
+ # tensors with no dtype information. This is because it infers types based
177
+ # on one selected item in the list. So e.g. when converting [2., 2j]
178
+ # to a tensor, it will select float32 as the inferred type and not be able
179
+ # to convert the list to a float 32 tensor.
180
+ # Since we have some information about the final dtype we care about, we
181
+ # supply that information so that convert_to_tensor will do best-effort
182
+ # conversion to that dtype first.
183
+ result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
184
+ result_t = math_ops.cast(result_t, dtype=dtype)
185
+ elif dtype:
186
+ result_t = math_ops.cast(result_t, dtype)
187
+
188
+ if copy:
189
+ result_t = array_ops.identity(result_t)
190
+
191
+ max_ndmin = 32
192
+ if ndmin > max_ndmin:
193
+ raise ValueError(
194
+ f'ndmin bigger than allowable number of dimensions: {max_ndmin}.'
195
+ )
196
+
197
+ if ndmin == 0:
198
+ return result_t
199
+
200
+ ndims = array_ops.rank(result_t)
201
+
202
+ def true_fn():
203
+ old_shape = array_ops.shape(result_t)
204
+ new_shape = array_ops.concat(
205
+ [array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0
206
+ )
207
+ return array_ops.reshape(result_t, new_shape)
208
+
209
+ result_t = np_utils.cond(
210
+ np_utils.greater(ndmin, ndims), true_fn, lambda: result_t
211
+ )
212
+ return result_t
213
+
214
+
215
+ # TODO(wangpeng): investigate whether we can make `copy` default to False.
216
+ # pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
217
+ @tf_export.tf_export('experimental.numpy.array', v1=[])
218
+ @np_utils.np_doc_only('array')
219
+ def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
220
+ """Since Tensors are immutable, a copy is made only if val is placed on a
221
+
222
+ different device than the current one. Even if `copy` is False, a new Tensor
223
+ may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
224
+ is an ndarray or a Tensor.
225
+ """ # pylint:disable=g-docstring-missing-newline
226
+ if dtype:
227
+ dtype = np_utils.result_type(dtype)
228
+ return _array_internal(val, dtype, copy, ndmin)
229
+
230
+
231
+ # pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
232
+
233
+
234
+ @tf_export.tf_export('experimental.numpy.asarray', v1=[])
235
+ @np_utils.np_doc('asarray')
236
+ def asarray(a, dtype=None):
237
+ if dtype:
238
+ dtype = np_utils.result_type(dtype)
239
+ if isinstance(a, np_arrays.ndarray) and (
240
+ not dtype or dtype == a.dtype.as_numpy_dtype
241
+ ):
242
+ return a
243
+ return array(a, dtype, copy=False)
244
+
245
+
246
+ @tf_export.tf_export('experimental.numpy.asanyarray', v1=[])
247
+ @np_utils.np_doc('asanyarray')
248
+ def asanyarray(a, dtype=None):
249
+ return asarray(a, dtype)
250
+
251
+
252
+ @tf_export.tf_export('experimental.numpy.ascontiguousarray', v1=[])
253
+ @np_utils.np_doc('ascontiguousarray')
254
+ def ascontiguousarray(a, dtype=None):
255
+ return array(a, dtype, ndmin=1)
256
+
257
+
258
+ # Numerical ranges.
259
+ @tf_export.tf_export('experimental.numpy.arange', v1=[])
260
+ @np_utils.np_doc('arange')
261
+ def arange(start, stop=None, step=1, dtype=None):
262
+ """Returns `step`-separated values in the range [start, stop).
263
+
264
+ Args:
265
+ start: Start of the interval. Included in the range.
266
+ stop: End of the interval. If not specified, `start` is treated as 0 and
267
+ `start` value is used as `stop`. If specified, it is not included in the
268
+ range if `step` is integer. When `step` is floating point, it may or may
269
+ not be included.
270
+ step: The difference between 2 consecutive values in the output range. It is
271
+ recommended to use `linspace` instead of using non-integer values for
272
+ `step`.
273
+ dtype: Optional. Type of the resulting ndarray. Could be a python type, a
274
+ NumPy type or a TensorFlow `DType`. If not provided, the largest type of
275
+ `start`, `stop`, `step` is used.
276
+
277
+ Raises:
278
+ ValueError: If step is zero.
279
+ """
280
+ if not step:
281
+ raise ValueError('step must be non-zero.')
282
+ if dtype:
283
+ dtype = np_utils.result_type(dtype)
284
+ else:
285
+ if stop is None:
286
+ dtype = np_utils.result_type(start, step)
287
+ else:
288
+ dtype = np_utils.result_type(start, step, stop)
289
+ if step > 0 and (
290
+ (stop is not None and start > stop) or (stop is None and start < 0)
291
+ ):
292
+ return array([], dtype=dtype)
293
+ if step < 0 and (
294
+ (stop is not None and start < stop) or (stop is None and start > 0)
295
+ ):
296
+ return array([], dtype=dtype)
297
+ # TODO(srbs): There are some bugs when start or stop is float type and dtype
298
+ # is integer type.
299
+ return math_ops.cast(
300
+ math_ops.range(start, limit=stop, delta=step), dtype=dtype
301
+ )
302
+
303
+
304
+ # Building matrices.
305
+ @tf_export.tf_export('experimental.numpy.diag', v1=[])
306
+ @np_utils.np_doc('diag')
307
+ def diag(v, k=0): # pylint: disable=missing-docstring
308
+ """Raises an error if input is not 1- or 2-d."""
309
+ v = asarray(v)
310
+ v_rank = array_ops.rank(v)
311
+
312
+ v.shape.with_rank_at_most(2)
313
+
314
+ # TODO(nareshmodi): Consider a np_utils.Assert version that will fail during
315
+ # tracing time if the shape is known.
316
+ control_flow_assert.Assert(
317
+ np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)),
318
+ [v_rank],
319
+ )
320
+
321
+ def _diag(v, k):
322
+ return np_utils.cond(
323
+ math_ops.equal(array_ops.size(v), 0),
324
+ lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype),
325
+ lambda: array_ops.matrix_diag(v, k=k),
326
+ )
327
+
328
+ def _diag_part(v, k):
329
+ v_shape = array_ops.shape(v)
330
+ v, k = np_utils.cond(
331
+ np_utils.logical_or(
332
+ np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)),
333
+ np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)),
334
+ ),
335
+ lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0),
336
+ lambda: (v, k),
337
+ )
338
+ result = array_ops.matrix_diag_part(v, k=k)
339
+ return result
340
+
341
+ result = np_utils.cond(
342
+ math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k)
343
+ )
344
+ return result
345
+
346
+
347
+ @tf_export.tf_export('experimental.numpy.diagonal', v1=[])
348
+ @np_utils.np_doc('diagonal')
349
+ def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring
350
+ a = asarray(a)
351
+
352
+ maybe_rank = a.shape.rank
353
+ if (
354
+ maybe_rank is not None
355
+ and offset == 0
356
+ and (axis1 == maybe_rank - 2 or axis1 == -2)
357
+ and (axis2 == maybe_rank - 1 or axis2 == -1)
358
+ ):
359
+ return array_ops.matrix_diag_part(a)
360
+
361
+ a = moveaxis(a, (axis1, axis2), (-2, -1))
362
+
363
+ a_shape = array_ops.shape(a)
364
+
365
+ def _zeros(): # pylint: disable=missing-docstring
366
+ return (
367
+ array_ops.zeros(
368
+ array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype
369
+ ),
370
+ 0,
371
+ )
372
+
373
+ # All zeros since diag_part doesn't handle all possible k (aka offset).
374
+ # Written this way since cond will run shape inference on both branches,
375
+ # and diag_part shape inference will fail when offset is out of bounds.
376
+ a, offset = np_utils.cond(
377
+ np_utils.logical_or(
378
+ np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)),
379
+ np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)),
380
+ ),
381
+ _zeros,
382
+ lambda: (a, offset),
383
+ )
384
+
385
+ a = array_ops.matrix_diag_part(a, k=offset)
386
+ return a
387
+
388
+
389
+ @tf_export.tf_export('experimental.numpy.diagflat', v1=[])
390
+ @np_utils.np_doc('diagflat')
391
+ def diagflat(v, k=0):
392
+ v = asarray(v)
393
+ return diag(array_ops.reshape(v, [-1]), k)
394
+
395
+
396
+ def _promote_dtype(*arrays):
397
+ dtype = np_utils.result_type(*arrays)
398
+
399
+ def _fast_asarray(a):
400
+ if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype:
401
+ return a
402
+ return _array_internal(a, dtype=dtype, copy=False)
403
+
404
+ return [_fast_asarray(a) for a in arrays]
405
+
406
+
407
+ def _promote_dtype_binary(t1, t2):
408
+ dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access
409
+ if not (
410
+ isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype
411
+ ):
412
+ t1 = _array_internal(t1, dtype=dtype, copy=False)
413
+ if not (
414
+ isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype
415
+ ):
416
+ t2 = _array_internal(t2, dtype=dtype, copy=False)
417
+ return t1, t2
418
+
419
+
420
+ @tf_export.tf_export('experimental.numpy.all', v1=[])
421
+ @np_utils.np_doc('all')
422
+ def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
423
+ a = asarray(a, dtype=bool)
424
+ return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims)
425
+
426
+
427
+ @tf_export.tf_export('experimental.numpy.any', v1=[])
428
+ @np_utils.np_doc('any')
429
+ def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
430
+ a = asarray(a, dtype=bool)
431
+ return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims)
432
+
433
+
434
+ @tf_export.tf_export('experimental.numpy.compress', v1=[])
435
+ @np_utils.np_doc('compress')
436
+ def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring
437
+ condition = asarray(condition, dtype=bool)
438
+ a = asarray(a)
439
+
440
+ if condition.ndim != 1:
441
+ raise ValueError('condition must be a 1-d array.')
442
+ # `np.compress` treats scalars as 1-d arrays.
443
+ if a.ndim == 0:
444
+ a = ravel(a)
445
+
446
+ if axis is None:
447
+ a = ravel(a)
448
+ axis = 0
449
+
450
+ if axis < 0:
451
+ axis += a.ndim
452
+
453
+ assert axis >= 0 and axis < a.ndim
454
+
455
+ # `tf.boolean_mask` requires the first dimensions of array and condition to
456
+ # match. `np.compress` pads condition with False when it is shorter.
457
+ condition_t = condition
458
+ a_t = a
459
+ if condition.shape[0] < a.shape[axis]:
460
+ padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False)
461
+ condition_t = array_ops.concat([condition_t, padding], axis=0)
462
+ return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis)
463
+
464
+
465
+ @tf_export.tf_export('experimental.numpy.copy', v1=[])
466
+ @np_utils.np_doc('copy')
467
+ def copy(a):
468
+ return array(a, copy=True)
469
+
470
+
471
+ def _maybe_promote_to_int(a):
472
+ if dtypes.as_dtype(a.dtype).is_integer:
473
+ # If a is an integer type and its precision is less than that of `int`,
474
+ # the output type will be `int`.
475
+ a_numpy_dtype = a.dtype.as_numpy_dtype
476
+ output_type = np.promote_types(a_numpy_dtype, int)
477
+ if output_type != a_numpy_dtype:
478
+ a = asarray(a, dtype=output_type)
479
+
480
+ return a
481
+
482
+
483
+ @tf_export.tf_export('experimental.numpy.cumprod', v1=[])
484
+ @np_utils.np_doc('cumprod')
485
+ def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring
486
+ a = asarray(a, dtype=dtype)
487
+
488
+ if dtype is None:
489
+ a = _maybe_promote_to_int(a)
490
+
491
+ # If axis is None, the input is flattened.
492
+ if axis is None:
493
+ a = ravel(a)
494
+ axis = 0
495
+ elif axis < 0:
496
+ axis += array_ops.rank(a)
497
+ return math_ops.cumprod(a, axis)
498
+
499
+
500
+ @tf_export.tf_export('experimental.numpy.cumsum', v1=[])
501
+ @np_utils.np_doc('cumsum')
502
+ def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring
503
+ a = asarray(a, dtype=dtype)
504
+
505
+ if dtype is None:
506
+ a = _maybe_promote_to_int(a)
507
+
508
+ # If axis is None, the input is flattened.
509
+ if axis is None:
510
+ a = ravel(a)
511
+ axis = 0
512
+ elif axis < 0:
513
+ axis += array_ops.rank(a)
514
+ return math_ops.cumsum(a, axis)
515
+
516
+
517
+ @tf_export.tf_export('experimental.numpy.imag', v1=[])
518
+ @np_utils.np_doc('imag')
519
+ def imag(val):
520
+ val = asarray(val)
521
+ # TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always
522
+ # return an ndarray.
523
+ return math_ops.imag(val)
524
+
525
+
526
+ _TO_INT_ = 0
527
+ _TO_FLOAT = 1
528
+
529
+
530
+ def _reduce(
531
+ tf_fn,
532
+ a,
533
+ axis=None,
534
+ dtype=None,
535
+ keepdims=None,
536
+ promote_int=_TO_INT_,
537
+ tf_bool_fn=None,
538
+ preserve_bool=False,
539
+ ):
540
+ """A general reduction function.
541
+
542
+ Args:
543
+ tf_fn: the TF reduction function.
544
+ a: the array to be reduced.
545
+ axis: (optional) the axis along which to do the reduction. If None, all
546
+ dimensions are reduced.
547
+ dtype: (optional) the dtype of the result.
548
+ keepdims: (optional) whether to keep the reduced dimension(s).
549
+ promote_int: how to promote integer and bool inputs. There are three
550
+ choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2)
551
+ `_TO_FLOAT` always promotes them to a float type (determined by
552
+ dtypes.default_float_type); (3) None: don't promote.
553
+ tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
554
+ only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
555
+ is `np.bool_` and `preserve_bool` is True.
556
+ preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
557
+ is `np.bool_` (some reductions such as np.sum convert bools to integers,
558
+ while others such as np.max preserve bools.
559
+
560
+ Returns:
561
+ An ndarray.
562
+ """
563
+ if dtype:
564
+ dtype = np_utils.result_type(dtype)
565
+ if keepdims is None:
566
+ keepdims = False
567
+ a = asarray(a, dtype=dtype)
568
+ if (
569
+ dtype == np.bool_ or preserve_bool and a.dtype == np.bool_
570
+ ) and tf_bool_fn is not None:
571
+ return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims)
572
+ if dtype is None:
573
+ dtype = a.dtype.as_numpy_dtype
574
+ if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
575
+ if promote_int == _TO_INT_:
576
+ # If a is an integer/bool type and whose bit width is less than np.int_,
577
+ # numpy up-casts it to np.int_ based on the documentation at
578
+ # https://numpy.org/doc/1.18/reference/generated/numpy.sum.html
579
+ if dtype == np.bool_:
580
+ is_signed = True
581
+ width = 8 # We can use any number here that is less than 64
582
+ else:
583
+ is_signed = np.issubdtype(dtype, np.signedinteger)
584
+ width = np.iinfo(dtype).bits
585
+ # Numpy int_ and uint are defined as 'long' and 'unsigned long', so
586
+ # should have the same bit width.
587
+ if ops.is_auto_dtype_conversion_enabled():
588
+ # We default to 32 bits when using auto dtype conversion semantics.
589
+ if width < np.iinfo(np.int32).bits:
590
+ if is_signed:
591
+ dtype = np.int32
592
+ else:
593
+ dtype = np.uint32
594
+ else:
595
+ if width < np.iinfo(np.int_).bits:
596
+ if is_signed:
597
+ dtype = np.int_
598
+ else:
599
+ dtype = np.uint
600
+ a = math_ops.cast(a, dtype)
601
+ elif promote_int == _TO_FLOAT:
602
+ # Use a default float type.
603
+ a = math_ops.cast(a, np_utils.result_type(float))
604
+
605
+ if isinstance(axis, tensor_lib.Tensor) and axis.dtype not in (
606
+ dtypes.int32,
607
+ dtypes.int64,
608
+ ):
609
+ axis = math_ops.cast(axis, dtypes.int64)
610
+
611
+ return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims)
612
+
613
+
614
+ # TODO (DarrenZhang01): Add `axis` support to the `size` API.
615
+ @tf_export.tf_export('experimental.numpy.size', v1=[])
616
+ @np_utils.np_doc('size')
617
+ def size(x, axis=None): # pylint: disable=missing-docstring
618
+ if axis is not None:
619
+ raise NotImplementedError(
620
+ 'axis argument is not supported in the current `np.size` implementation'
621
+ )
622
+ if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)):
623
+ return 1
624
+ x = asarray(x)
625
+ if x.shape.is_fully_defined():
626
+ return np.prod(x.shape.as_list(), dtype=int)
627
+ else:
628
+ return array_ops.size_v2(x)
629
+
630
+
631
+ @tf_export.tf_export('experimental.numpy.sum', v1=[])
632
+ @np_utils.np_doc('sum')
633
+ def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
634
+ return _reduce(
635
+ math_ops.reduce_sum,
636
+ a,
637
+ axis=axis,
638
+ dtype=dtype,
639
+ keepdims=keepdims,
640
+ tf_bool_fn=math_ops.reduce_any,
641
+ )
642
+
643
+
644
+ @tf_export.tf_export('experimental.numpy.prod', v1=[])
645
+ @np_utils.np_doc('prod')
646
+ def prod(a, axis=None, dtype=None, keepdims=None):
647
+ return _reduce(
648
+ math_ops.reduce_prod,
649
+ a,
650
+ axis=axis,
651
+ dtype=dtype,
652
+ keepdims=keepdims,
653
+ tf_bool_fn=math_ops.reduce_all,
654
+ )
655
+
656
+
657
+ @tf_export.tf_export('experimental.numpy.mean', v1=[])
658
+ @np_utils.np_doc('mean', unsupported_params=['out'])
659
+ def mean(a, axis=None, dtype=None, out=None, keepdims=None):
660
+ if out is not None:
661
+ raise ValueError('Setting out is not supported.')
662
+ return _reduce(
663
+ math_ops.reduce_mean,
664
+ a,
665
+ axis=axis,
666
+ dtype=dtype,
667
+ keepdims=keepdims,
668
+ promote_int=_TO_FLOAT,
669
+ )
670
+
671
+
672
+ @tf_export.tf_export('experimental.numpy.amax', v1=[])
673
+ @np_utils.np_doc('amax', unsupported_params=['out'])
674
+ def amax(a, axis=None, out=None, keepdims=None):
675
+ if out is not None:
676
+ raise ValueError('Setting out is not supported.')
677
+ return _reduce(
678
+ math_ops.reduce_max,
679
+ a,
680
+ axis=axis,
681
+ dtype=None,
682
+ keepdims=keepdims,
683
+ promote_int=None,
684
+ tf_bool_fn=math_ops.reduce_any,
685
+ preserve_bool=True,
686
+ )
687
+
688
+
689
+ @tf_export.tf_export('experimental.numpy.amin', v1=[])
690
+ @np_utils.np_doc('amin', unsupported_params=['out'])
691
+ def amin(a, axis=None, out=None, keepdims=None):
692
+ if out is not None:
693
+ raise ValueError('Setting out is not supported.')
694
+ return _reduce(
695
+ math_ops.reduce_min,
696
+ a,
697
+ axis=axis,
698
+ dtype=None,
699
+ keepdims=keepdims,
700
+ promote_int=None,
701
+ tf_bool_fn=math_ops.reduce_all,
702
+ preserve_bool=True,
703
+ )
704
+
705
+
706
+ @tf_export.tf_export('experimental.numpy.var', v1=[])
707
+ @np_utils.np_doc('var')
708
+ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring
709
+ if dtype:
710
+ working_dtype = np_utils.result_type(a, dtype)
711
+ else:
712
+ working_dtype = None
713
+ if out is not None:
714
+ raise ValueError('Setting out is not supported.')
715
+ if ddof != 0:
716
+ # TF reduce_variance doesn't support ddof, so calculate it using raw ops.
717
+ def reduce_fn(input_tensor, axis, keepdims):
718
+ means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True)
719
+ centered = input_tensor - means
720
+ if input_tensor.dtype in (dtypes.complex64, dtypes.complex128):
721
+ centered = math_ops.cast(
722
+ math_ops.real(centered * math_ops.conj(centered)),
723
+ input_tensor.dtype,
724
+ )
725
+ else:
726
+ centered = math_ops.square(centered)
727
+ squared_deviations = math_ops.reduce_sum(
728
+ centered, axis=axis, keepdims=keepdims
729
+ )
730
+
731
+ if axis is None:
732
+ n = array_ops.size(input_tensor)
733
+ else:
734
+ if axis < 0:
735
+ axis += array_ops.rank(input_tensor)
736
+ n = math_ops.reduce_prod(
737
+ array_ops.gather(array_ops.shape(input_tensor), axis)
738
+ )
739
+ n = math_ops.cast(n - ddof, input_tensor.dtype)
740
+
741
+ return math_ops.cast(math_ops.divide(squared_deviations, n), dtype)
742
+
743
+ else:
744
+ reduce_fn = math_ops.reduce_variance
745
+
746
+ result = _reduce(
747
+ reduce_fn,
748
+ a,
749
+ axis=axis,
750
+ dtype=working_dtype,
751
+ keepdims=keepdims,
752
+ promote_int=_TO_FLOAT,
753
+ )
754
+ if dtype:
755
+ result = math_ops.cast(result, dtype)
756
+ return result
757
+
758
+
759
+ @tf_export.tf_export('experimental.numpy.std', v1=[])
760
+ @np_utils.np_doc('std')
761
+ def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring
762
+ return _reduce(
763
+ math_ops.reduce_std,
764
+ a,
765
+ axis=axis,
766
+ dtype=None,
767
+ keepdims=keepdims,
768
+ promote_int=_TO_FLOAT,
769
+ )
770
+
771
+
772
+ @tf_export.tf_export('experimental.numpy.ravel', v1=[])
773
+ @np_utils.np_doc('ravel')
774
+ def ravel(a): # pylint: disable=missing-docstring
775
+ a = asarray(a)
776
+ return array_ops.reshape(a, [-1])
777
+
778
+
779
+ @tf_export.tf_export('experimental.numpy.real', v1=[])
780
+ @np_utils.np_doc('real')
781
+ def real(val):
782
+ val = asarray(val)
783
+ # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
784
+ # return an ndarray.
785
+ return math_ops.real(val)
786
+
787
+
788
+ @tf_export.tf_export('experimental.numpy.repeat', v1=[])
789
+ @np_utils.np_doc('repeat')
790
+ def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring
791
+ a = asarray(a)
792
+ original_shape = a._shape_as_list() # pylint: disable=protected-access
793
+ # Best effort recovery of the shape.
794
+ known_shape = original_shape is not None and None not in original_shape
795
+ if known_shape:
796
+ if not original_shape:
797
+ original_shape = (repeats,)
798
+ else:
799
+ repeats_np = np.ravel(np.array(repeats))
800
+ if repeats_np.size == 1:
801
+ repeats_np = repeats_np.item()
802
+ if axis is None:
803
+ original_shape = (repeats_np * np.prod(original_shape),)
804
+ else:
805
+ original_shape[axis] = repeats_np * original_shape[axis]
806
+ else:
807
+ if axis is None:
808
+ original_shape = (repeats_np.sum(),)
809
+ else:
810
+ original_shape[axis] = repeats_np.sum()
811
+
812
+ repeats = asarray(repeats)
813
+ result = array_ops.repeat(a, repeats, axis)
814
+ if known_shape:
815
+ result.set_shape(original_shape)
816
+
817
+ return result
818
+
819
+
820
+ @tf_export.tf_export('experimental.numpy.around', v1=[])
821
+ @np_utils.np_doc('around')
822
+ def around(a, decimals=0): # pylint: disable=missing-docstring
823
+ a = asarray(a)
824
+ dtype = a.dtype.as_numpy_dtype
825
+ factor = math.pow(10, decimals)
826
+ if np.issubdtype(dtype, np.inexact):
827
+ factor = math_ops.cast(factor, dtype)
828
+ else:
829
+ # Use float as the working dtype when a.dtype is exact (e.g. integer),
830
+ # because `decimals` can be negative.
831
+ float_dtype = np_utils.result_type(float)
832
+ a = a.astype(float_dtype)
833
+ factor = math_ops.cast(factor, float_dtype)
834
+ a = math_ops.multiply(a, factor)
835
+ a = math_ops.round(a)
836
+ a = math_ops.divide(a, factor)
837
+ return a.astype(dtype)
838
+
839
+
840
+ setattr(np_arrays.ndarray, '__round__', around)
841
+
842
+
843
+ @tf_export.tf_export('experimental.numpy.reshape', v1=[])
844
+ @np_utils.np_doc('reshape')
845
+ def reshape(a, newshape, order='C'):
846
+ """order argument can only b 'C' or 'F'."""
847
+ if order not in {'C', 'F'}:
848
+ raise ValueError('Unsupported order argument {}'.format(order))
849
+
850
+ a = asarray(a)
851
+ if isinstance(newshape, int):
852
+ newshape = [newshape]
853
+
854
+ if order == 'F':
855
+ r = array_ops.transpose(
856
+ array_ops.reshape(array_ops.transpose(a), newshape[::-1])
857
+ )
858
+ else:
859
+ r = array_ops.reshape(a, newshape)
860
+
861
+ return r
862
+
863
+
864
+ def _reshape_method_wrapper(a, *newshape, **kwargs):
865
+ order = kwargs.pop('order', 'C')
866
+ if kwargs:
867
+ raise ValueError('Unsupported arguments: {}'.format(kwargs.keys()))
868
+
869
+ if len(newshape) == 1 and not isinstance(newshape[0], int):
870
+ newshape = newshape[0]
871
+
872
+ return reshape(a, newshape, order=order)
873
+
874
+
875
+ @tf_export.tf_export('experimental.numpy.expand_dims', v1=[])
876
+ @np_utils.np_doc('expand_dims')
877
+ def expand_dims(a, axis):
878
+ a = asarray(a)
879
+ return array_ops.expand_dims(a, axis=axis)
880
+
881
+
882
+ @tf_export.tf_export('experimental.numpy.squeeze', v1=[])
883
+ @np_utils.np_doc('squeeze')
884
+ def squeeze(a, axis=None):
885
+ a = asarray(a)
886
+ return array_ops.squeeze(a, axis)
887
+
888
+
889
+ @tf_export.tf_export('experimental.numpy.flatten', v1=[])
890
+ @np_utils.np_doc('flatten', link=np_utils.NoLink())
891
+ def flatten(a, order='C'):
892
+ a = asarray(a)
893
+ if order == 'C' or order == 'A' or order == 'K':
894
+ # Row major.
895
+ return array_ops.reshape(a, [-1])
896
+ elif order == 'F':
897
+ # Column major
898
+ return array_ops.reshape(array_ops.transpose(a), [-1])
899
+ else:
900
+ raise ValueError(
901
+ 'order can only be C, A, K (all row major) or F (column major).'
902
+ )
903
+
904
+
905
+ @tf_export.tf_export('experimental.numpy.transpose', v1=[])
906
+ @np_utils.np_doc('transpose')
907
+ def transpose(a, axes=None):
908
+ a = asarray(a)
909
+ if axes is not None:
910
+ axes = asarray(axes)
911
+ return array_ops.transpose(a=a, perm=axes)
912
+
913
+
914
+ @tf_export.tf_export('experimental.numpy.swapaxes', v1=[])
915
+ @np_utils.np_doc('swapaxes')
916
+ def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
917
+ a = asarray(a)
918
+
919
+ def adjust_axes(axes, rank):
920
+ def f(x):
921
+ if isinstance(x, int):
922
+ if x < 0:
923
+ x = x + rank
924
+ else:
925
+ x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x)
926
+ return x
927
+
928
+ return nest.map_structure(f, axes)
929
+
930
+ if (
931
+ a.shape.rank is not None
932
+ and isinstance(axis1, int)
933
+ and isinstance(axis2, int)
934
+ ):
935
+ # This branch makes sure `perm` is statically known, to avoid a
936
+ # not-compile-time-constant XLA error.
937
+ a_rank = a.shape.rank
938
+ axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
939
+ perm = list(range(a_rank))
940
+ perm[axis1] = axis2
941
+ perm[axis2] = axis1
942
+ else:
943
+ a_rank = array_ops.rank(a)
944
+ axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
945
+ perm = math_ops.range(a_rank)
946
+ perm = array_ops.tensor_scatter_update(
947
+ perm, [[axis1], [axis2]], [axis2, axis1]
948
+ )
949
+ a = array_ops.transpose(a, perm)
950
+ return a
951
+
952
+
953
+ @tf_export.tf_export('experimental.numpy.moveaxis', v1=[])
954
+ @np_utils.np_doc('moveaxis')
955
+ def moveaxis(a, source, destination): # pylint: disable=missing-docstring
956
+ """Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
957
+ if not source and not destination:
958
+ return a
959
+
960
+ a = asarray(a)
961
+
962
+ if isinstance(source, int):
963
+ source = (source,)
964
+ if isinstance(destination, int):
965
+ destination = (destination,)
966
+ if len(source) != len(destination):
967
+ raise ValueError('The lengths of source and destination must equal')
968
+
969
+ a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access
970
+
971
+ def _correct_axis(axis, rank):
972
+ if axis < 0:
973
+ return axis + rank
974
+ return axis
975
+
976
+ source = tuple(_correct_axis(axis, a_rank) for axis in source)
977
+ destination = tuple(_correct_axis(axis, a_rank) for axis in destination)
978
+
979
+ if a.shape.rank is not None:
980
+ perm = [i for i in range(a_rank) if i not in source]
981
+ for dest, src in sorted(zip(destination, source)):
982
+ assert dest <= len(perm)
983
+ perm.insert(dest, src)
984
+ else:
985
+ r = math_ops.range(a_rank)
986
+
987
+ def _remove_indices(a, b):
988
+ """Remove indices (`b`) from `a`."""
989
+ items = array_ops_stack.unstack(
990
+ sort_ops.sort(array_ops_stack.stack(b)), num=len(b)
991
+ )
992
+
993
+ i = 0
994
+ result = []
995
+
996
+ for item in items:
997
+ result.append(a[i:item])
998
+ i = item + 1
999
+
1000
+ result.append(a[i:])
1001
+
1002
+ return array_ops.concat(result, 0)
1003
+
1004
+ minus_sources = _remove_indices(r, source)
1005
+ minus_dest = _remove_indices(r, destination)
1006
+
1007
+ perm = array_ops.scatter_nd(
1008
+ array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank]
1009
+ )
1010
+ perm = array_ops.tensor_scatter_update(
1011
+ perm, array_ops.expand_dims(destination, 1), source
1012
+ )
1013
+ a = array_ops.transpose(a, perm)
1014
+
1015
+ return a
1016
+
1017
+
1018
+ @tf_export.tf_export('experimental.numpy.pad', v1=[])
1019
+ @np_utils.np_doc('pad')
1020
+ def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name
1021
+ """Only supports modes 'constant', 'reflect' and 'symmetric' currently."""
1022
+ constant_values = kwargs.get('constant_values', 0)
1023
+ if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
1024
+ raise ValueError('Unsupported padding mode: ' + mode)
1025
+ mode = mode.upper()
1026
+ array = asarray(array)
1027
+ pad_width = asarray(pad_width, dtype=dtypes.int32)
1028
+ return array_ops.pad(
1029
+ tensor=array,
1030
+ paddings=pad_width,
1031
+ mode=mode,
1032
+ constant_values=constant_values,
1033
+ )
1034
+
1035
+
1036
+ @tf_export.tf_export('experimental.numpy.take', v1=[])
1037
+ @np_utils.np_doc('take')
1038
+ def take(a, indices, axis=None, out=None, mode='clip'):
1039
+ """out argument is not supported, and default mode is clip."""
1040
+ if out is not None:
1041
+ raise ValueError('out argument is not supported in take.')
1042
+
1043
+ if mode not in {'raise', 'clip', 'wrap'}:
1044
+ raise ValueError("Invalid mode '{}' for take".format(mode))
1045
+
1046
+ a = asarray(a)
1047
+ indices = asarray(indices)
1048
+
1049
+ if axis is None:
1050
+ a = array_ops.reshape(a, [-1])
1051
+ axis = 0
1052
+
1053
+ axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]
1054
+ if mode == 'clip':
1055
+ indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)
1056
+ elif mode == 'wrap':
1057
+ indices = math_ops.floormod(indices, axis_size)
1058
+ else:
1059
+ raise ValueError("The 'raise' mode to take is not supported.")
1060
+
1061
+ return array_ops.gather(a, indices, axis=axis)
1062
+
1063
+
1064
+ @tf_export.tf_export('experimental.numpy.where', v1=[])
1065
+ @np_utils.np_doc_only('where')
1066
+ def where(condition, x=None, y=None):
1067
+ """Raises ValueError if exactly one of x or y is not None."""
1068
+ condition = asarray(condition, dtype=np.bool_)
1069
+ if x is None and y is None:
1070
+ return nonzero(condition)
1071
+ elif x is not None and y is not None:
1072
+ x, y = _promote_dtype(x, y)
1073
+ return array_ops.where_v2(condition, x, y)
1074
+ raise ValueError('Both x and y must be ndarrays, or both must be None.')
1075
+
1076
+
1077
+ @tf_export.tf_export('experimental.numpy.select', v1=[])
1078
+ @np_utils.np_doc('select')
1079
+ def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring
1080
+ if len(condlist) != len(choicelist):
1081
+ msg = 'condlist must have length equal to choicelist ({} vs {})'
1082
+ raise ValueError(msg.format(len(condlist), len(choicelist)))
1083
+ if not condlist:
1084
+ raise ValueError('condlist must be non-empty')
1085
+ choices = _promote_dtype(default, *choicelist)
1086
+ choicelist = choices[1:]
1087
+ output = choices[0]
1088
+ # The traversal is in reverse order so we can return the first value in
1089
+ # choicelist where condlist is True.
1090
+ for cond, choice in zip(condlist[::-1], choicelist[::-1]):
1091
+ output = where(cond, choice, output)
1092
+ return output
1093
+
1094
+
1095
+ @tf_export.tf_export('experimental.numpy.shape', v1=[])
1096
+ @np_utils.np_doc(
1097
+ 'shape',
1098
+ link=np_utils.Link(
1099
+ 'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html'
1100
+ ),
1101
+ )
1102
+ def shape(a):
1103
+ a = asarray(a)
1104
+ return a.shape
1105
+
1106
+
1107
+ @tf_export.tf_export('experimental.numpy.ndim', v1=[])
1108
+ @np_utils.np_doc('ndim', link=np_utils.NoLink())
1109
+ def ndim(a):
1110
+ a = asarray(a)
1111
+ return a.ndim
1112
+
1113
+
1114
+ @tf_export.tf_export('experimental.numpy.isscalar', v1=[])
1115
+ @np_utils.np_doc('isscalar')
1116
+ def isscalar(num):
1117
+ return ndim(num) == 0
1118
+
1119
+
1120
+ def _boundaries_to_sizes(a, boundaries, axis):
1121
+ """Converting boundaries of splits to sizes of splits.
1122
+
1123
+ Args:
1124
+ a: the array to be split.
1125
+ boundaries: the boundaries, as in np.split.
1126
+ axis: the axis along which to split.
1127
+
1128
+ Returns:
1129
+ A list of sizes of the splits, as in tf.split.
1130
+ """
1131
+ if axis >= len(a.shape):
1132
+ raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
1133
+ total_size = a.shape[axis]
1134
+ sizes = []
1135
+ sizes_sum = 0
1136
+ prev = 0
1137
+ for i, b in enumerate(boundaries):
1138
+ size = b - prev
1139
+ if size < 0:
1140
+ raise ValueError(
1141
+ 'The %s-th boundary %s is smaller than the previous boundary %s'
1142
+ % (i, b, prev)
1143
+ )
1144
+ size = builtins.min(size, builtins.max(0, total_size - sizes_sum))
1145
+ sizes.append(size)
1146
+ sizes_sum += size
1147
+ prev = b
1148
+ sizes.append(builtins.max(0, total_size - sizes_sum))
1149
+ return sizes
1150
+
1151
+
1152
+ @tf_export.tf_export('experimental.numpy.split', v1=[])
1153
+ @np_utils.np_doc('split')
1154
+ def split(ary, indices_or_sections, axis=0):
1155
+ ary = asarray(ary)
1156
+ if not isinstance(indices_or_sections, int):
1157
+ indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
1158
+ return array_ops.split(ary, indices_or_sections, axis=axis)
1159
+
1160
+
1161
+ def _split_on_axis(np_fun_name, axis): # pylint: disable=missing-function-docstring
1162
+ @np_utils.np_doc(np_fun_name)
1163
+ def f(ary, indices_or_sections):
1164
+ # for 1-D array, hsplit becomes vsplit
1165
+ new_axis = np_utils.cond(
1166
+ math_ops.equal(axis, 1),
1167
+ lambda: np_utils.cond( # pylint: disable=g-long-lambda
1168
+ math_ops.equal(array_ops.rank(ary), 1), lambda: 0, lambda: axis
1169
+ ),
1170
+ lambda: axis,
1171
+ )
1172
+ if isinstance(indices_or_sections, int):
1173
+ ary_shape = ary.shape[new_axis]
1174
+ if ary_shape is not None and ary_shape % indices_or_sections:
1175
+ raise ValueError('array split does not result in an equal division')
1176
+ return split(ary, indices_or_sections, axis=new_axis)
1177
+
1178
+ return f
1179
+
1180
+
1181
+ vsplit = tf_export.tf_export('experimental.numpy.vsplit', v1=[])(
1182
+ _split_on_axis('vsplit', axis=0)
1183
+ )
1184
+ hsplit = tf_export.tf_export('experimental.numpy.hsplit', v1=[])(
1185
+ _split_on_axis('hsplit', axis=1)
1186
+ )
1187
+ dsplit = tf_export.tf_export('experimental.numpy.dsplit', v1=[])(
1188
+ _split_on_axis('dsplit', axis=2)
1189
+ )
1190
+
1191
+
1192
+ @tf_export.tf_export('experimental.numpy.broadcast_to', v1=[])
1193
+ @np_utils.np_doc('broadcast_to')
1194
+ def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
1195
+ return full(shape, array)
1196
+
1197
+
1198
+ @tf_export.tf_export('experimental.numpy.stack', v1=[])
1199
+ @np_utils.np_doc('stack')
1200
+ def stack(arrays, axis=0): # pylint: disable=missing-function-docstring
1201
+ if isinstance(arrays, (np_arrays.ndarray, tensor_lib.Tensor)):
1202
+ arrays = asarray(arrays)
1203
+ if axis == 0:
1204
+ return arrays
1205
+ else:
1206
+ return swapaxes(arrays, 0, axis)
1207
+ arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
1208
+ unwrapped_arrays = [
1209
+ a if isinstance(a, np_arrays.ndarray) else a for a in arrays
1210
+ ]
1211
+ return asarray(array_ops_stack.stack(unwrapped_arrays, axis))
1212
+
1213
+
1214
+ @tf_export.tf_export('experimental.numpy.hstack', v1=[])
1215
+ @np_utils.np_doc('hstack')
1216
+ def hstack(tup):
1217
+ arrays = [atleast_1d(a) for a in tup]
1218
+ arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
1219
+ unwrapped_arrays = [
1220
+ a if isinstance(a, np_arrays.ndarray) else a for a in arrays
1221
+ ]
1222
+ rank = array_ops.rank(unwrapped_arrays[0])
1223
+ return np_utils.cond(
1224
+ math_ops.equal(rank, 1),
1225
+ lambda: array_ops.concat(unwrapped_arrays, axis=0),
1226
+ lambda: array_ops.concat(unwrapped_arrays, axis=1),
1227
+ )
1228
+
1229
+
1230
+ @tf_export.tf_export('experimental.numpy.vstack', v1=[])
1231
+ @np_utils.np_doc('vstack')
1232
+ def vstack(tup):
1233
+ arrays = [atleast_2d(a) for a in tup]
1234
+ arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
1235
+ unwrapped_arrays = [
1236
+ a if isinstance(a, np_arrays.ndarray) else a for a in arrays
1237
+ ]
1238
+ return array_ops.concat(unwrapped_arrays, axis=0)
1239
+
1240
+
1241
+ @tf_export.tf_export('experimental.numpy.dstack', v1=[])
1242
+ @np_utils.np_doc('dstack')
1243
+ def dstack(tup):
1244
+ arrays = [atleast_3d(a) for a in tup]
1245
+ arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
1246
+ unwrapped_arrays = [
1247
+ a if isinstance(a, np_arrays.ndarray) else a for a in arrays
1248
+ ]
1249
+ return array_ops.concat(unwrapped_arrays, axis=2)
1250
+
1251
+
1252
+ def _pad_left_to(n, old_shape):
1253
+ old_shape = asarray(old_shape, dtype=np.int32)
1254
+ new_shape = array_ops.pad(
1255
+ old_shape,
1256
+ [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
1257
+ constant_values=1,
1258
+ )
1259
+ return asarray(new_shape)
1260
+
1261
+
1262
+ def _atleast_nd(n, new_shape, *arys):
1263
+ """Reshape arrays to be at least `n`-dimensional.
1264
+
1265
+ Args:
1266
+ n: The minimal rank.
1267
+ new_shape: a function that takes `n` and the old shape and returns the
1268
+ desired new shape.
1269
+ *arys: ndarray(s) to be reshaped.
1270
+
1271
+ Returns:
1272
+ The reshaped array(s).
1273
+ """
1274
+
1275
+ def f(x):
1276
+ # pylint: disable=g-long-lambda
1277
+ x = asarray(x)
1278
+ return asarray(
1279
+ np_utils.cond(
1280
+ np_utils.greater(n, array_ops.rank(x)),
1281
+ lambda: reshape(x, new_shape(n, array_ops.shape(x))),
1282
+ lambda: x,
1283
+ )
1284
+ )
1285
+
1286
+ arys = list(map(f, arys))
1287
+ if len(arys) == 1:
1288
+ return arys[0]
1289
+ else:
1290
+ return arys
1291
+
1292
+
1293
+ @tf_export.tf_export('experimental.numpy.atleast_1d', v1=[])
1294
+ @np_utils.np_doc('atleast_1d')
1295
+ def atleast_1d(*arys):
1296
+ return _atleast_nd(1, _pad_left_to, *arys)
1297
+
1298
+
1299
+ @tf_export.tf_export('experimental.numpy.atleast_2d', v1=[])
1300
+ @np_utils.np_doc('atleast_2d')
1301
+ def atleast_2d(*arys):
1302
+ return _atleast_nd(2, _pad_left_to, *arys)
1303
+
1304
+
1305
+ @tf_export.tf_export('experimental.numpy.atleast_3d', v1=[])
1306
+ @np_utils.np_doc('atleast_3d')
1307
+ def atleast_3d(*arys): # pylint: disable=missing-docstring
1308
+ def new_shape(_, old_shape):
1309
+ # pylint: disable=g-long-lambda
1310
+ ndim_ = array_ops.size(old_shape)
1311
+ return np_utils.cond(
1312
+ math_ops.equal(ndim_, 0),
1313
+ lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
1314
+ lambda: np_utils.cond(
1315
+ math_ops.equal(ndim_, 1),
1316
+ lambda: array_ops.pad(old_shape, [[1, 1]], constant_values=1),
1317
+ lambda: array_ops.pad(old_shape, [[0, 1]], constant_values=1),
1318
+ ),
1319
+ )
1320
+
1321
+ return _atleast_nd(3, new_shape, *arys)
1322
+
1323
+
1324
+ @tf_export.tf_export('experimental.numpy.nonzero', v1=[])
1325
+ @np_utils.np_doc('nonzero')
1326
+ def nonzero(a):
1327
+ a = atleast_1d(a)
1328
+ if a.shape.rank is None:
1329
+ raise ValueError(
1330
+ "The rank of `a` is unknown, so we can't decide how many "
1331
+ 'arrays to return.'
1332
+ )
1333
+ return array_ops_stack.unstack(
1334
+ array_ops.where_v2(math_ops.cast(a, dtypes.bool)), a.shape.rank, axis=1
1335
+ )
1336
+
1337
+
1338
+ @tf_export.tf_export('experimental.numpy.diag_indices', v1=[])
1339
+ @np_utils.np_doc('diag_indices')
1340
+ def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
1341
+ if n < 0:
1342
+ raise ValueError(
1343
+ 'n argument to diag_indices must be nonnegative, got {}'.format(n)
1344
+ )
1345
+ if ndim < 0:
1346
+ raise ValueError(
1347
+ 'ndim argument to diag_indices must be nonnegative, got {}'.format(ndim)
1348
+ )
1349
+
1350
+ return (math_ops.range(n),) * ndim
1351
+
1352
+
1353
+ @tf_export.tf_export('experimental.numpy.tri', v1=[])
1354
+ @np_utils.np_doc('tri')
1355
+ def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
1356
+ M = M if M is not None else N
1357
+ if dtype is not None:
1358
+ dtype = np_utils.result_type(dtype)
1359
+ else:
1360
+ # Use a default float type.
1361
+ dtype = np_utils.result_type(float)
1362
+
1363
+ if k < 0:
1364
+ lower = -k - 1
1365
+ if lower > N:
1366
+ r = array_ops.zeros([N, M], dtype)
1367
+ else:
1368
+ # Keep as tf bool, since we create an upper triangular matrix and invert
1369
+ # it.
1370
+ o = array_ops.ones([N, M], dtype=dtypes.bool)
1371
+ r = math_ops.cast(
1372
+ math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype
1373
+ )
1374
+ else:
1375
+ o = array_ops.ones([N, M], dtype)
1376
+ if k > M:
1377
+ r = o
1378
+ else:
1379
+ r = array_ops.matrix_band_part(o, -1, k)
1380
+ return r
1381
+
1382
+
1383
+ @tf_export.tf_export('experimental.numpy.tril', v1=[])
1384
+ @np_utils.np_doc('tril')
1385
+ def tril(m, k=0): # pylint: disable=missing-docstring
1386
+ m = asarray(m)
1387
+ if m.shape.ndims is None:
1388
+ raise ValueError('Argument to tril should have known rank')
1389
+ m_shape = m.shape.as_list()
1390
+
1391
+ if len(m_shape) < 2:
1392
+ raise ValueError('Argument to tril must have rank at least 2')
1393
+
1394
+ if m_shape[-1] is None or m_shape[-2] is None:
1395
+ raise ValueError(
1396
+ 'Currently, the last two dimensions of the input array '
1397
+ 'need to be known.'
1398
+ )
1399
+
1400
+ z = constant_op.constant(0, m.dtype)
1401
+
1402
+ mask = tri(*m_shape[-2:], k=k, dtype=bool)
1403
+ return array_ops.where_v2(
1404
+ array_ops.broadcast_to(mask, array_ops.shape(m)), m, z
1405
+ )
1406
+
1407
+
1408
+ @tf_export.tf_export('experimental.numpy.triu', v1=[])
1409
+ @np_utils.np_doc('triu')
1410
+ def triu(m, k=0): # pylint: disable=missing-docstring
1411
+ m = asarray(m)
1412
+ if m.shape.ndims is None:
1413
+ raise ValueError('Argument to triu should have known rank')
1414
+ m_shape = m.shape.as_list()
1415
+
1416
+ if len(m_shape) < 2:
1417
+ raise ValueError('Argument to triu must have rank at least 2')
1418
+
1419
+ if m_shape[-1] is None or m_shape[-2] is None:
1420
+ raise ValueError(
1421
+ 'Currently, the last two dimensions of the input array '
1422
+ 'need to be known.'
1423
+ )
1424
+
1425
+ z = constant_op.constant(0, m.dtype)
1426
+
1427
+ mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
1428
+ return array_ops.where_v2(
1429
+ array_ops.broadcast_to(mask, array_ops.shape(m)), z, m
1430
+ )
1431
+
1432
+
1433
+ @tf_export.tf_export('experimental.numpy.flip', v1=[])
1434
+ @np_utils.np_doc('flip')
1435
+ def flip(m, axis=None): # pylint: disable=missing-docstring
1436
+ m = asarray(m)
1437
+
1438
+ if axis is None:
1439
+ return array_ops.reverse(m, math_ops.range(array_ops.rank(m)))
1440
+
1441
+ axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
1442
+
1443
+ return array_ops.reverse(m, [axis])
1444
+
1445
+
1446
+ @tf_export.tf_export('experimental.numpy.flipud', v1=[])
1447
+ @np_utils.np_doc('flipud')
1448
+ def flipud(m): # pylint: disable=missing-docstring
1449
+ return flip(m, 0)
1450
+
1451
+
1452
+ @tf_export.tf_export('experimental.numpy.fliplr', v1=[])
1453
+ @np_utils.np_doc('fliplr')
1454
+ def fliplr(m): # pylint: disable=missing-docstring
1455
+ return flip(m, 1)
1456
+
1457
+
1458
+ @tf_export.tf_export('experimental.numpy.roll', v1=[])
1459
+ @np_utils.np_doc('roll')
1460
+ def roll(a, shift, axis=None): # pylint: disable=missing-docstring
1461
+ a = asarray(a)
1462
+
1463
+ if axis is not None:
1464
+ return manip_ops.roll(a, shift, axis)
1465
+
1466
+ # If axis is None, the roll happens as a 1-d tensor.
1467
+ original_shape = array_ops.shape(a)
1468
+ a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
1469
+ return array_ops.reshape(a, original_shape)
1470
+
1471
+
1472
+ @tf_export.tf_export('experimental.numpy.rot90', v1=[])
1473
+ @np_utils.np_doc('rot90')
1474
+ def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
1475
+ m_rank = array_ops.rank(m)
1476
+ ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
1477
+
1478
+ k = k % 4
1479
+ if k == 0:
1480
+ return m
1481
+ elif k == 2:
1482
+ return flip(flip(m, ax1), ax2)
1483
+ else:
1484
+ perm = math_ops.range(m_rank)
1485
+ perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
1486
+
1487
+ if k == 1:
1488
+ return transpose(flip(m, ax2), perm)
1489
+ else:
1490
+ return flip(transpose(m, perm), ax2)
1491
+
1492
+
1493
+ @tf_export.tf_export('experimental.numpy.vander', v1=[])
1494
+ @np_utils.np_doc('vander')
1495
+ def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
1496
+ x = asarray(x)
1497
+
1498
+ x_shape = array_ops.shape(x)
1499
+ if N is None:
1500
+ N = x_shape[0]
1501
+
1502
+ N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
1503
+ if N_temp is not None:
1504
+ N = N_temp
1505
+ if N < 0:
1506
+ raise ValueError('N must be nonnegative')
1507
+ else:
1508
+ control_flow_assert.Assert(N >= 0, [N])
1509
+
1510
+ rank = array_ops.rank(x)
1511
+ rank_temp = np_utils.get_static_value(rank)
1512
+ if rank_temp is not None:
1513
+ rank = rank_temp
1514
+ if rank != 1:
1515
+ raise ValueError('x must be a one-dimensional array')
1516
+ else:
1517
+ control_flow_assert.Assert(math_ops.equal(rank, 1), [rank])
1518
+
1519
+ if increasing:
1520
+ start = 0
1521
+ limit = N
1522
+ delta = 1
1523
+ else:
1524
+ start = N - 1
1525
+ limit = -1
1526
+ delta = -1
1527
+
1528
+ x = array_ops.expand_dims(x, -1)
1529
+ return math_ops.pow(
1530
+ x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype)
1531
+ )
1532
+
1533
+
1534
+ @tf_export.tf_export('experimental.numpy.ix_', v1=[])
1535
+ @np_utils.np_doc('ix_')
1536
+ def ix_(*args): # pylint: disable=missing-docstring
1537
+ n = len(args)
1538
+ output = []
1539
+ for i, a in enumerate(args):
1540
+ a = asarray(a)
1541
+ a_rank = array_ops.rank(a)
1542
+ a_rank_temp = np_utils.get_static_value(a_rank)
1543
+ if a_rank_temp is not None:
1544
+ a_rank = a_rank_temp
1545
+ if a_rank != 1:
1546
+ raise ValueError(
1547
+ 'Arguments must be 1-d, got arg {} of rank {}'.format(i, a_rank)
1548
+ )
1549
+ else:
1550
+ control_flow_assert.Assert(math_ops.equal(a_rank, 1), [a_rank])
1551
+
1552
+ new_shape = [1] * n
1553
+ new_shape[i] = -1
1554
+ dtype = a.dtype
1555
+ if dtype == dtypes.bool:
1556
+ output.append(array_ops.reshape(nonzero(a)[0], new_shape))
1557
+ elif dtype.is_integer:
1558
+ output.append(array_ops.reshape(a, new_shape))
1559
+ else:
1560
+ raise ValueError(
1561
+ 'Only integer and bool dtypes are supported, got {}'.format(dtype)
1562
+ )
1563
+
1564
+ return output
1565
+
1566
+
1567
+ @tf_export.tf_export('experimental.numpy.broadcast_arrays', v1=[])
1568
+ @np_utils.np_doc('broadcast_arrays')
1569
+ def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring
1570
+ subok = kwargs.pop('subok', False)
1571
+ if subok:
1572
+ raise ValueError('subok=True is not supported.')
1573
+ if kwargs:
1574
+ raise ValueError('Received unsupported arguments {}'.format(kwargs.keys()))
1575
+
1576
+ args = [asarray(arg) for arg in args]
1577
+ return np_utils.tf_broadcast(*args)
1578
+
1579
+
1580
+ @tf_export.tf_export('experimental.numpy.sign', v1=[])
1581
+ @np_utils.np_doc_only('sign')
1582
+ def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name
1583
+ if out:
1584
+ raise ValueError('tf.numpy doesnt support setting out.')
1585
+ if where:
1586
+ raise ValueError('tf.numpy doesnt support setting where.')
1587
+ if kwargs:
1588
+ raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys()))
1589
+
1590
+ x = asarray(x)
1591
+ dtype = x.dtype.as_numpy_dtype
1592
+ if np.issubdtype(dtype, np.complexfloating):
1593
+ result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype)
1594
+ else:
1595
+ result = math_ops.sign(x)
1596
+
1597
+ return result
1598
+
1599
+
1600
+ # Note that np.take_along_axis may not be present in some supported versions of
1601
+ # numpy.
1602
+ @tf_export.tf_export('experimental.numpy.take_along_axis', v1=[])
1603
+ @np_utils.np_doc('take_along_axis')
1604
+ def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring
1605
+ arr = asarray(arr)
1606
+ indices = asarray(indices)
1607
+
1608
+ if axis is None:
1609
+ return take_along_axis(arr.ravel(), indices, 0)
1610
+
1611
+ rank = array_ops.rank(arr)
1612
+ axis = axis + rank if axis < 0 else axis
1613
+
1614
+ # Broadcast shapes to match, ensure that the axis of interest is not
1615
+ # broadcast.
1616
+ arr_shape_original = array_ops.shape(arr, out_type=indices.dtype)
1617
+ indices_shape_original = array_ops.shape(indices, out_type=indices.dtype)
1618
+ arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1])
1619
+ indices_shape = array_ops.tensor_scatter_update(
1620
+ indices_shape_original, [[axis]], [1]
1621
+ )
1622
+ broadcasted_shape = array_ops.broadcast_dynamic_shape(
1623
+ arr_shape, indices_shape
1624
+ )
1625
+ arr_shape = array_ops.tensor_scatter_update(
1626
+ broadcasted_shape, [[axis]], [arr_shape_original[axis]]
1627
+ )
1628
+ indices_shape = array_ops.tensor_scatter_update(
1629
+ broadcasted_shape, [[axis]], [indices_shape_original[axis]]
1630
+ )
1631
+ arr = array_ops.broadcast_to(arr, arr_shape)
1632
+ indices = array_ops.broadcast_to(indices, indices_shape)
1633
+
1634
+ # Save indices shape so we can restore it later.
1635
+ possible_result_shape = indices.shape
1636
+
1637
+ # Correct indices since gather doesn't correctly handle negative indices.
1638
+ indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices)
1639
+
1640
+ swapaxes_ = lambda t: swapaxes(t, axis, -1)
1641
+
1642
+ dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1))
1643
+ arr = np_utils.cond(
1644
+ dont_move_axis_to_end, lambda: arr, lambda: swapaxes_(arr)
1645
+ )
1646
+ indices = np_utils.cond(
1647
+ dont_move_axis_to_end, lambda: indices, lambda: swapaxes_(indices)
1648
+ )
1649
+
1650
+ arr_shape = array_ops.shape(arr)
1651
+ arr = array_ops.reshape(arr, [-1, arr_shape[-1]])
1652
+
1653
+ indices_shape = array_ops.shape(indices)
1654
+ indices = array_ops.reshape(indices, [-1, indices_shape[-1]])
1655
+
1656
+ result = array_ops.gather(arr, indices, batch_dims=1)
1657
+ result = array_ops.reshape(result, indices_shape)
1658
+ result = np_utils.cond(
1659
+ dont_move_axis_to_end, lambda: result, lambda: swapaxes_(result)
1660
+ )
1661
+ result.set_shape(possible_result_shape)
1662
+
1663
+ return result
1664
+
1665
+
1666
+ # pylint: disable=redefined-builtin,undefined-variable
1667
+ @tf_export.tf_export('experimental.numpy.max', v1=[])
1668
+ @np_utils.np_doc('max', link=np_utils.AliasOf('amax'))
1669
+ def max(a, axis=None, keepdims=None):
1670
+ return amax(a, axis=axis, keepdims=keepdims)
1671
+
1672
+
1673
+ @tf_export.tf_export('experimental.numpy.min', v1=[])
1674
+ @np_utils.np_doc('min', link=np_utils.AliasOf('amin'))
1675
+ def min(a, axis=None, keepdims=None):
1676
+ return amin(a, axis=axis, keepdims=keepdims)
1677
+
1678
+
1679
+ @tf_export.tf_export('experimental.numpy.round', v1=[])
1680
+ @np_utils.np_doc('round', link=np_utils.AliasOf('around'))
1681
+ def round(a, decimals=0):
1682
+ return around(a, decimals=decimals)
1683
+
1684
+
1685
+ # pylint: enable=redefined-builtin,undefined-variable
1686
+
1687
+
1688
+ _SLICE_ERROR = (
1689
+ 'only integers, slices (`:`), ellipsis (`...`), '
1690
+ 'numpy.newaxis (`None`) and integer or boolean arrays are valid indices'
1691
+ )
1692
+
1693
+
1694
+ def _as_index(idx, need_scalar=True):
1695
+ """Helper function to parse idx as an index.
1696
+
1697
+ Args:
1698
+ idx: index
1699
+ need_scalar: If idx needs to be a scalar value.
1700
+
1701
+ Returns:
1702
+ A pair, (indx, bool). First one is the parsed index and can be a tensor,
1703
+ or scalar integer / Dimension. Second one is True if rank is known to be 0.
1704
+
1705
+ Raises:
1706
+ IndexError: For incorrect indices.
1707
+ """
1708
+ if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
1709
+ return idx, True
1710
+ data = asarray(idx)
1711
+ if data.dtype == dtypes.bool:
1712
+ if data.shape.ndims != 1:
1713
+ # TODO(agarwal): handle higher rank boolean masks.
1714
+ raise NotImplementedError('Need rank 1 for bool index %s' % idx)
1715
+ data = array_ops.where_v2(data)
1716
+ data = array_ops.reshape(data, [-1])
1717
+ if need_scalar and data.shape.rank not in (None, 0):
1718
+ raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
1719
+ np_dtype = data.dtype.as_numpy_dtype
1720
+ if not np.issubdtype(np_dtype, np.integer):
1721
+ raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
1722
+ if data.dtype not in (dtypes.int64, dtypes.int32):
1723
+ # TF slicing can only handle int32/int64. So we need to cast.
1724
+ promoted_dtype = np.promote_types(np.int32, np_dtype)
1725
+ if promoted_dtype == np.int32:
1726
+ data = math_ops.cast(data, dtypes.int32)
1727
+ elif promoted_dtype == np.int64:
1728
+ data = math_ops.cast(data, dtypes.int64)
1729
+ else:
1730
+ raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))
1731
+ return data, data.shape.rank == 0
1732
+
1733
+
1734
+ class _UpdateMethod(enum.Enum):
1735
+ UPDATE = 0
1736
+ ADD = 1
1737
+ MIN = 2
1738
+ MAX = 3
1739
+
1740
+
1741
+ def _slice_helper(tensor, slice_spec, update_method=None, updates=None):
1742
+ """Helper function for __getitem__ and _with_index_update_helper.
1743
+
1744
+ This function collects the indices in `slice_spec` into two buckets, which we
1745
+ can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2
1746
+ `gather`. They also correspond to "basic indices" and "advanced indices" in
1747
+ numpy. This function supports both reading and writing at the indices. The
1748
+ reading path can be summarized as `gather(stride_slice(tensor, idx1),
1749
+ idx2)`. The writing path can be summarized as `strided_slice_update(tensor,
1750
+ idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here
1751
+ means `tf.gather` or `tf.gather_nd`; `scatter` here means
1752
+ `tf.tensor_scatter_update`.) The writing path is inefficient because it needs
1753
+ to first read out a portion (probably much larger than `updates`) of `tensor`
1754
+ using `strided_slice`, update it, and then write the portion back. An
1755
+ alternative approach is to only use `scatter`, which amounts to using the
1756
+ indexing mechanism of gather/scatter to implement
1757
+ strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter
1758
+ because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but
1759
+ not TF gather/scatter because they don't support spans (except those that
1760
+ cover entire dimensions, i.e. `:`). If we materialize spans into individual
1761
+ indices, the size of the index tensor would explode. (Note that XLA
1762
+ Gather/Scatter have a similar problem for stride > 1 because they don't
1763
+ support strides. Indices such as `1:2:8` will need to be materialized into
1764
+ individual indices such as [1, 3, 5, 7].)
1765
+
1766
+ Args:
1767
+ tensor: the tensor to be read from or write into.
1768
+ slice_spec: the indices.
1769
+ update_method: (optional) a member of `_UpdateMethod`, indicating how to
1770
+ update the values (replacement, add, etc.). `None` indicates just reading.
1771
+ updates: (optional) the new values to write into `tensor`. It must have the
1772
+ same dtype as `tensor`.
1773
+
1774
+ Returns:
1775
+ The result of reading (if `update_method` is `None`) or the updated `tensor`
1776
+ after writing.
1777
+ """
1778
+ begin, end, strides = [], [], []
1779
+ new_axis_mask, shrink_axis_mask = 0, 0
1780
+ begin_mask, end_mask = 0, 0
1781
+ ellipsis_mask = 0
1782
+ advanced_indices = []
1783
+ shrink_indices = []
1784
+ for index, s in enumerate(slice_spec):
1785
+ if isinstance(s, slice):
1786
+ if s.start is not None:
1787
+ begin.append(_as_index(s.start)[0])
1788
+ else:
1789
+ begin.append(0)
1790
+ begin_mask |= 1 << index
1791
+ if s.stop is not None:
1792
+ end.append(_as_index(s.stop)[0])
1793
+ else:
1794
+ end.append(0)
1795
+ end_mask |= 1 << index
1796
+ if s.step is not None:
1797
+ strides.append(_as_index(s.step)[0])
1798
+ else:
1799
+ strides.append(1)
1800
+ elif s is Ellipsis:
1801
+ begin.append(0)
1802
+ end.append(0)
1803
+ strides.append(1)
1804
+ ellipsis_mask |= 1 << index
1805
+ elif s is array_ops.newaxis:
1806
+ begin.append(0)
1807
+ end.append(0)
1808
+ strides.append(1)
1809
+ new_axis_mask |= 1 << index
1810
+ else:
1811
+ s, is_scalar = _as_index(s, False)
1812
+ if is_scalar:
1813
+ begin.append(s)
1814
+ end.append(s + 1)
1815
+ strides.append(1)
1816
+ shrink_axis_mask |= 1 << index
1817
+ shrink_indices.append(index)
1818
+ else:
1819
+ begin.append(0)
1820
+ end.append(0)
1821
+ strides.append(1)
1822
+ begin_mask |= 1 << index
1823
+ end_mask |= 1 << index
1824
+ advanced_indices.append((index, s, ellipsis_mask != 0))
1825
+
1826
+ # stack possibly involves no tensors, so we must use op_scope correct graph.
1827
+ with ops.name_scope(
1828
+ None,
1829
+ 'strided_slice',
1830
+ [tensor] + begin + end + strides,
1831
+ skip_on_eager=False,
1832
+ ) as name:
1833
+ if begin:
1834
+ packed_begin, packed_end, packed_strides = (
1835
+ array_ops_stack.stack(begin),
1836
+ array_ops_stack.stack(end),
1837
+ array_ops_stack.stack(strides),
1838
+ )
1839
+ if (
1840
+ packed_begin.dtype == dtypes.int64
1841
+ or packed_end.dtype == dtypes.int64
1842
+ or packed_strides.dtype == dtypes.int64
1843
+ ):
1844
+ if packed_begin.dtype != dtypes.int64:
1845
+ packed_begin = math_ops.cast(packed_begin, dtypes.int64)
1846
+ if packed_end.dtype != dtypes.int64:
1847
+ packed_end = math_ops.cast(packed_end, dtypes.int64)
1848
+ if packed_strides.dtype != dtypes.int64:
1849
+ packed_strides = math_ops.cast(packed_strides, dtypes.int64)
1850
+ else:
1851
+ var_empty = constant_op.constant([], dtype=dtypes.int32)
1852
+ packed_begin = packed_end = packed_strides = var_empty
1853
+ if update_method == _UpdateMethod.UPDATE and not advanced_indices:
1854
+ return array_ops.tensor_strided_slice_update(
1855
+ tensor,
1856
+ packed_begin,
1857
+ packed_end,
1858
+ packed_strides,
1859
+ updates,
1860
+ begin_mask=begin_mask,
1861
+ end_mask=end_mask,
1862
+ shrink_axis_mask=shrink_axis_mask,
1863
+ new_axis_mask=new_axis_mask,
1864
+ ellipsis_mask=ellipsis_mask,
1865
+ name=name,
1866
+ )
1867
+ else:
1868
+ # TODO(b/164251540): Find a better way to support update that does not
1869
+ # involve one read + two writes.
1870
+ if updates is not None:
1871
+ original_tensor = tensor
1872
+ # TODO(agarwal): set_shape on tensor to set rank.
1873
+ tensor = array_ops.strided_slice(
1874
+ tensor,
1875
+ packed_begin,
1876
+ packed_end,
1877
+ packed_strides,
1878
+ begin_mask=begin_mask,
1879
+ end_mask=end_mask,
1880
+ shrink_axis_mask=shrink_axis_mask,
1881
+ new_axis_mask=new_axis_mask,
1882
+ ellipsis_mask=ellipsis_mask,
1883
+ name=name,
1884
+ )
1885
+ if not advanced_indices:
1886
+ if update_method is None:
1887
+ return tensor
1888
+ assert update_method != _UpdateMethod.UPDATE
1889
+ # TF lacks TensorStridedSliceAdd and alike, so we need to do
1890
+ # read+add+update.
1891
+ if update_method == _UpdateMethod.ADD:
1892
+ update_op = math_ops.add
1893
+ elif update_method == _UpdateMethod.MIN:
1894
+ update_op = math_ops.minimum
1895
+ elif update_method == _UpdateMethod.MAX:
1896
+ update_op = math_ops.maximum
1897
+ return array_ops.tensor_strided_slice_update(
1898
+ original_tensor,
1899
+ packed_begin,
1900
+ packed_end,
1901
+ packed_strides,
1902
+ update_op(tensor, updates),
1903
+ begin_mask=begin_mask,
1904
+ end_mask=end_mask,
1905
+ shrink_axis_mask=shrink_axis_mask,
1906
+ new_axis_mask=new_axis_mask,
1907
+ ellipsis_mask=ellipsis_mask,
1908
+ name=name + '_2',
1909
+ )
1910
+ advanced_indices_map = {}
1911
+ for index, data, had_ellipsis in advanced_indices:
1912
+ if had_ellipsis:
1913
+ num_shrink = len([x for x in shrink_indices if x > index])
1914
+ dim = index - len(slice_spec) + num_shrink
1915
+ else:
1916
+ num_shrink = len([x for x in shrink_indices if x < index])
1917
+ dim = index - num_shrink
1918
+ advanced_indices_map[dim] = data
1919
+ dims = sorted(advanced_indices_map.keys())
1920
+ dims_contiguous = True
1921
+ if len(dims) > 1:
1922
+ if dims[0] < 0 and dims[-1] >= 0: # not all same sign
1923
+ dims_contiguous = False
1924
+ else:
1925
+ for i in range(len(dims) - 1):
1926
+ if dims[i] + 1 != dims[i + 1]:
1927
+ dims_contiguous = False
1928
+ break
1929
+ indices = [advanced_indices_map[x] for x in dims]
1930
+ indices = _promote_dtype(*indices)
1931
+ indices = np_utils.tf_broadcast(*indices)
1932
+ stacked_indices = array_ops_stack.stack(indices, axis=-1)
1933
+ # Skip the contiguous-dims optimization for update because there is no
1934
+ # tf.*scatter* op that supports the `axis` argument.
1935
+ if not dims_contiguous or updates is not None:
1936
+ if range(len(dims)) != dims:
1937
+ tensor = moveaxis(tensor, dims, range(len(dims)))
1938
+ tensor_shape_prefix = array_ops.shape(
1939
+ tensor, out_type=stacked_indices.dtype
1940
+ )[: len(dims)]
1941
+ stacked_indices = array_ops.where_v2(
1942
+ stacked_indices < 0,
1943
+ stacked_indices + tensor_shape_prefix,
1944
+ stacked_indices,
1945
+ )
1946
+ if updates is None:
1947
+ return array_ops.gather_nd(tensor, stacked_indices)
1948
+ else:
1949
+ # We only need to move-axis `updates` in the contiguous case becausce
1950
+ # only in this case the result dimensions of advanced indexing are in
1951
+ # the middle of `updates`. In the non-contiguous case, those dimensions
1952
+ # are always at the front.
1953
+ if dims_contiguous:
1954
+ # TODO(wangpeng): Support unknown rank (e.g. by partially flattening
1955
+ # `updates`)
1956
+ if stacked_indices.shape.rank is None:
1957
+ raise NotImplementedError(
1958
+ 'Rank of the advanced indices must currently be known'
1959
+ )
1960
+ batch_size = stacked_indices.shape.rank - 1
1961
+ batch_start = dims[0]
1962
+ if batch_start < 0:
1963
+ batch_start += len(dims) - batch_size
1964
+
1965
+ def range_(start, length):
1966
+ return range(start, start + length)
1967
+
1968
+ updates = moveaxis(
1969
+ updates, range_(batch_start, batch_size), range(batch_size)
1970
+ )
1971
+ if update_method == _UpdateMethod.UPDATE:
1972
+ update_op = array_ops.tensor_scatter_update
1973
+ elif update_method == _UpdateMethod.ADD:
1974
+ update_op = array_ops.tensor_scatter_add
1975
+ elif update_method == _UpdateMethod.MIN:
1976
+ update_op = array_ops.tensor_scatter_min
1977
+ elif update_method == _UpdateMethod.MAX:
1978
+ update_op = array_ops.tensor_scatter_max
1979
+ tensor = update_op(tensor, stacked_indices, updates)
1980
+ if range(len(dims)) != dims:
1981
+ tensor = moveaxis(tensor, range(len(dims)), dims)
1982
+ return array_ops.tensor_strided_slice_update(
1983
+ original_tensor,
1984
+ packed_begin,
1985
+ packed_end,
1986
+ packed_strides,
1987
+ tensor,
1988
+ begin_mask=begin_mask,
1989
+ end_mask=end_mask,
1990
+ shrink_axis_mask=shrink_axis_mask,
1991
+ new_axis_mask=new_axis_mask,
1992
+ ellipsis_mask=ellipsis_mask,
1993
+ name=name + '_2',
1994
+ )
1995
+ # Note that gather_nd does not support gathering from inside the array.
1996
+ # To avoid shuffling data back and forth, we transform the indices and
1997
+ # do a gather instead.
1998
+ rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access
1999
+ dims = [(x + rank if x < 0 else x) for x in dims]
2000
+ shape_tensor = array_ops.shape(tensor)
2001
+ dim_sizes = array_ops.gather(shape_tensor, dims)
2002
+ if len(dims) == 1:
2003
+ stacked_indices = indices[0]
2004
+ stacked_indices = math_ops.cast(stacked_indices, dtypes.int32)
2005
+ stacked_indices = array_ops.where_v2(
2006
+ stacked_indices < 0, stacked_indices + dim_sizes, stacked_indices
2007
+ )
2008
+ axis = dims[0]
2009
+ if len(dims) > 1:
2010
+ index_scaling = math_ops.cumprod(dim_sizes, reverse=True, exclusive=True)
2011
+
2012
+ def _tensordot(a, b):
2013
+ # TODO(b/168657656): This function should be replaced by
2014
+ # tensordot(axis=1) once MatMul has int32 XLA kernel.
2015
+ b = array_ops.broadcast_to(b, array_ops.shape(a))
2016
+ return math_ops.reduce_sum(a * b, axis=-1)
2017
+
2018
+ stacked_indices = _tensordot(stacked_indices, index_scaling)
2019
+ flat_shape = array_ops.concat(
2020
+ [shape_tensor[:axis], [-1], shape_tensor[axis + len(dims) :]], axis=0
2021
+ )
2022
+ tensor = array_ops.reshape(tensor, flat_shape)
2023
+
2024
+ return array_ops.gather(tensor, stacked_indices, axis=axis)
2025
+
2026
+
2027
+ def _as_spec_tuple(slice_spec):
2028
+ """Convert slice_spec to tuple."""
2029
+ if isinstance(slice_spec, (list, tuple)) and not isinstance(
2030
+ slice_spec, np.ndarray
2031
+ ):
2032
+ is_index = True
2033
+ for s in slice_spec:
2034
+ if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):
2035
+ is_index = False
2036
+ break
2037
+ elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:
2038
+ is_index = False
2039
+ break
2040
+ if not is_index:
2041
+ return tuple(slice_spec)
2042
+ return (slice_spec,)
2043
+
2044
+
2045
+ def _getitem(self, slice_spec):
2046
+ """Implementation of ndarray.__getitem__."""
2047
+ if (
2048
+ isinstance(slice_spec, bool)
2049
+ or (
2050
+ isinstance(slice_spec, core_tf_types.Tensor)
2051
+ and slice_spec.dtype == dtypes.bool
2052
+ )
2053
+ or (
2054
+ isinstance(slice_spec, (np.ndarray, np_arrays.ndarray))
2055
+ and slice_spec.dtype == np.bool_
2056
+ )
2057
+ ):
2058
+ return array_ops.boolean_mask(tensor=self, mask=slice_spec)
2059
+
2060
+ if not isinstance(slice_spec, tuple):
2061
+ slice_spec = _as_spec_tuple(slice_spec)
2062
+
2063
+ result_t = _slice_helper(self, slice_spec)
2064
+ return result_t
2065
+
2066
+
2067
+ def _with_index_update_helper(update_method, a, slice_spec, updates):
2068
+ """Implementation of ndarray._with_index_*."""
2069
+ if (
2070
+ isinstance(slice_spec, bool)
2071
+ or (
2072
+ isinstance(slice_spec, core_tf_types.Tensor)
2073
+ and slice_spec.dtype == dtypes.bool
2074
+ )
2075
+ or (
2076
+ isinstance(slice_spec, (np.ndarray, np_arrays.ndarray))
2077
+ and slice_spec.dtype == np.bool_
2078
+ )
2079
+ ):
2080
+ slice_spec = nonzero(slice_spec)
2081
+
2082
+ if not isinstance(slice_spec, tuple):
2083
+ slice_spec = _as_spec_tuple(slice_spec)
2084
+
2085
+ a_dtype = a.dtype
2086
+ a, updates = _promote_dtype_binary(a, updates)
2087
+ result_t = _slice_helper(a, slice_spec, update_method, updates)
2088
+ return result_t.astype(a_dtype)
2089
+
2090
+
2091
+ setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem)
2092
+ setattr(
2093
+ np_arrays.ndarray,
2094
+ '_with_index_update',
2095
+ functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE),
2096
+ )
2097
+ setattr(
2098
+ np_arrays.ndarray,
2099
+ '_with_index_add',
2100
+ functools.partial(_with_index_update_helper, _UpdateMethod.ADD),
2101
+ )
2102
+ setattr(
2103
+ np_arrays.ndarray,
2104
+ '_with_index_min',
2105
+ functools.partial(_with_index_update_helper, _UpdateMethod.MIN),
2106
+ )
2107
+ setattr(
2108
+ np_arrays.ndarray,
2109
+ '_with_index_max',
2110
+ functools.partial(_with_index_update_helper, _UpdateMethod.MAX),
2111
+ )
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_arrays.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """ndarray class."""
16
+
17
+ # pylint: disable=g-direct-tensorflow-import
18
+
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.framework import tensor
21
+ from tensorflow.python.framework import tensor_conversion
22
+ from tensorflow.python.ops.numpy_ops import np_dtypes
23
+
24
+
25
+ def convert_to_tensor(value, dtype=None, dtype_hint=None):
26
+ """Wrapper over `tf.convert_to_tensor`.
27
+
28
+ Args:
29
+ value: value to convert
30
+ dtype: (optional) the type we would like it to be converted to.
31
+ dtype_hint: (optional) soft preference for the type we would like it to be
32
+ converted to. `tf.convert_to_tensor` will attempt to convert value to this
33
+ type first, but will not fail if conversion is not possible falling back
34
+ to inferring the type instead.
35
+
36
+ Returns:
37
+ Value converted to tf.Tensor.
38
+ """
39
+ # A safer version of `tf.convert_to_tensor` to work around b/149876037.
40
+ # TODO(wangpeng): Remove this function once the bug is fixed.
41
+ if (dtype is None and isinstance(value, int) and
42
+ value >= 2**63):
43
+ dtype = dtypes.uint64
44
+ elif dtype is None and dtype_hint is None and isinstance(value, float):
45
+ dtype = np_dtypes.default_float_type()
46
+ return tensor_conversion.convert_to_tensor_v2_with_dispatch(
47
+ value, dtype=dtype, dtype_hint=dtype_hint)
48
+
49
+
50
+ ndarray = tensor.Tensor
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_config.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Config functions for TF NumPy."""
16
+
17
+ from tensorflow.python.framework import ops
18
+ from tensorflow.python.ops import weak_tensor_ops # pylint: disable=unused-import
19
+ from tensorflow.python.ops.numpy_ops import np_dtypes
20
+ from tensorflow.python.ops.numpy_ops import np_math_ops
21
+ from tensorflow.python.platform import tf_logging
22
+ from tensorflow.python.util import tf_export
23
+
24
+
25
+ @tf_export.tf_export(
26
+ "experimental.numpy.experimental_enable_numpy_behavior", v1=[]
27
+ )
28
+ def enable_numpy_behavior(prefer_float32=False, dtype_conversion_mode="legacy"):
29
+ """Enable NumPy behavior on Tensors.
30
+
31
+ Enabling NumPy behavior has three effects:
32
+ * It adds to `tf.Tensor` some common NumPy methods such as `T`,
33
+ `reshape` and `ravel`.
34
+ * It changes dtype promotion in `tf.Tensor` operators to be
35
+ compatible with NumPy. For example,
36
+ `tf.ones([], tf.int32) + tf.ones([], tf.float32)` used to throw a
37
+ "dtype incompatible" error, but after this it will return a
38
+ float64 tensor (obeying NumPy's promotion rules).
39
+ * It enhances `tf.Tensor`'s indexing capability to be on par with
40
+ [NumPy's](https://numpy.org/doc/stable/reference/arrays.indexing.html).
41
+
42
+ Args:
43
+ prefer_float32: Controls whether dtype inference will use float32 for Python
44
+ floats, or float64 (the default and the NumPy-compatible behavior).
45
+ dtype_conversion_mode: a string that specifies promotion mode. This string
46
+ corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe', or
47
+ 'all'. 'safe' or 'all' mode enables the auto dtype conversion semantics.
48
+ """
49
+ if dtype_conversion_mode == "safe" or dtype_conversion_mode == "all":
50
+ tf_logging.warning(
51
+ "UserWarning: enabling the new type promotion must happen at the"
52
+ " beginning of the program. Please ensure no TF APIs have been used"
53
+ " yet."
54
+ )
55
+ ops.set_dtype_conversion_mode(dtype_conversion_mode)
56
+ ops.enable_numpy_style_slicing()
57
+ np_math_ops.enable_numpy_methods_on_tensor()
58
+ np_dtypes.set_prefer_float32(prefer_float32)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_dtypes.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Dtypes and dtype utilities."""
16
+
17
+ import numpy as np
18
+
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.util import tf_export
21
+
22
+
23
+ # We use numpy's dtypes instead of TF's, because the user expects to use them
24
+ # with numpy facilities such as `np.dtype(np.int64)` and
25
+ # `if x.dtype.type is np.int64`.
26
+ bool_ = np.bool_
27
+ tf_export.tf_export('experimental.numpy.bool_', v1=[]).export_constant(
28
+ __name__, 'bool_'
29
+ )
30
+ complex_ = np.complex_
31
+ tf_export.tf_export('experimental.numpy.complex_', v1=[]).export_constant(
32
+ __name__, 'complex_'
33
+ )
34
+ complex128 = np.complex128
35
+ tf_export.tf_export('experimental.numpy.complex128', v1=[]).export_constant(
36
+ __name__, 'complex128'
37
+ )
38
+ complex64 = np.complex64
39
+ tf_export.tf_export('experimental.numpy.complex64', v1=[]).export_constant(
40
+ __name__, 'complex64'
41
+ )
42
+ float_ = np.float_
43
+ tf_export.tf_export('experimental.numpy.float_', v1=[]).export_constant(
44
+ __name__, 'float_'
45
+ )
46
+ float16 = np.float16
47
+ tf_export.tf_export('experimental.numpy.float16', v1=[]).export_constant(
48
+ __name__, 'float16'
49
+ )
50
+ float32 = np.float32
51
+ tf_export.tf_export('experimental.numpy.float32', v1=[]).export_constant(
52
+ __name__, 'float32'
53
+ )
54
+ float64 = np.float64
55
+ tf_export.tf_export('experimental.numpy.float64', v1=[]).export_constant(
56
+ __name__, 'float64'
57
+ )
58
+ inexact = np.inexact
59
+ tf_export.tf_export('experimental.numpy.inexact', v1=[]).export_constant(
60
+ __name__, 'inexact'
61
+ )
62
+ int_ = np.int_
63
+ tf_export.tf_export('experimental.numpy.int_', v1=[]).export_constant(
64
+ __name__, 'int_'
65
+ )
66
+ int16 = np.int16
67
+ tf_export.tf_export('experimental.numpy.int16', v1=[]).export_constant(
68
+ __name__, 'int16'
69
+ )
70
+ int32 = np.int32
71
+ tf_export.tf_export('experimental.numpy.int32', v1=[]).export_constant(
72
+ __name__, 'int32'
73
+ )
74
+ int64 = np.int64
75
+ tf_export.tf_export('experimental.numpy.int64', v1=[]).export_constant(
76
+ __name__, 'int64'
77
+ )
78
+ int8 = np.int8
79
+ tf_export.tf_export('experimental.numpy.int8', v1=[]).export_constant(
80
+ __name__, 'int8'
81
+ )
82
+ object_ = np.object_
83
+ tf_export.tf_export('experimental.numpy.object_', v1=[]).export_constant(
84
+ __name__, 'object_'
85
+ )
86
+ string_ = np.string_
87
+ tf_export.tf_export('experimental.numpy.string_', v1=[]).export_constant(
88
+ __name__, 'string_'
89
+ )
90
+ uint16 = np.uint16
91
+ tf_export.tf_export('experimental.numpy.uint16', v1=[]).export_constant(
92
+ __name__, 'uint16'
93
+ )
94
+ uint32 = np.uint32
95
+ tf_export.tf_export('experimental.numpy.uint32', v1=[]).export_constant(
96
+ __name__, 'uint32'
97
+ )
98
+ uint64 = np.uint64
99
+ tf_export.tf_export('experimental.numpy.uint64', v1=[]).export_constant(
100
+ __name__, 'uint64'
101
+ )
102
+ uint8 = np.uint8
103
+ tf_export.tf_export('experimental.numpy.uint8', v1=[]).export_constant(
104
+ __name__, 'uint8'
105
+ )
106
+ unicode_ = np.unicode_
107
+ tf_export.tf_export('experimental.numpy.unicode_', v1=[]).export_constant(
108
+ __name__, 'unicode_'
109
+ )
110
+
111
+
112
+ iinfo = np.iinfo
113
+ tf_export.tf_export('experimental.numpy.iinfo', v1=[]).export_constant(
114
+ __name__, 'iinfo'
115
+ )
116
+
117
+
118
+ issubdtype = tf_export.tf_export('experimental.numpy.issubdtype', v1=[])(
119
+ np.issubdtype
120
+ )
121
+
122
+
123
+ _to_float32 = {
124
+ np.dtype('float64'): np.dtype('float32'),
125
+ np.dtype('complex128'): np.dtype('complex64'),
126
+ }
127
+
128
+
129
+ _cached_np_dtypes = {}
130
+
131
+
132
+ # Difference between is_prefer_float32 and is_allow_float64: is_prefer_float32
133
+ # only decides which dtype to use for Python floats; is_allow_float64 decides
134
+ # whether float64 dtypes can ever appear in programs. The latter is more
135
+ # restrictive than the former.
136
+ _prefer_float32 = False
137
+
138
+
139
+ # TODO(b/178862061): Consider removing this knob
140
+ _allow_float64 = True
141
+
142
+
143
+ def is_prefer_float32():
144
+ return _prefer_float32
145
+
146
+
147
+ def set_prefer_float32(b):
148
+ global _prefer_float32
149
+ _prefer_float32 = b
150
+
151
+
152
+ def is_allow_float64():
153
+ return _allow_float64
154
+
155
+
156
+ def set_allow_float64(b):
157
+ global _allow_float64
158
+ _allow_float64 = b
159
+
160
+
161
+ def canonicalize_dtype(dtype):
162
+ if not _allow_float64:
163
+ try:
164
+ return _to_float32[dtype]
165
+ except KeyError:
166
+ pass
167
+ return dtype
168
+
169
+
170
+ def _result_type(*arrays_and_dtypes):
171
+ """Returns the resulting type given a set of arrays."""
172
+
173
+ def preprocess_float(x):
174
+ if is_prefer_float32():
175
+ if isinstance(x, float):
176
+ return np.float32(x)
177
+ elif isinstance(x, complex):
178
+ return np.complex64(x)
179
+ return x
180
+
181
+ arrays_and_dtypes = [preprocess_float(x) for x in arrays_and_dtypes]
182
+ dtype = np.result_type(*arrays_and_dtypes)
183
+ return dtypes.as_dtype(canonicalize_dtype(dtype))
184
+
185
+
186
+ def _get_cached_dtype(dtype):
187
+ """Returns an np.dtype for the TensorFlow DType."""
188
+ global _cached_np_dtypes
189
+ try:
190
+ return _cached_np_dtypes[dtype]
191
+ except KeyError:
192
+ pass
193
+ cached_dtype = np.dtype(dtype.as_numpy_dtype)
194
+ _cached_np_dtypes[dtype] = cached_dtype
195
+ return cached_dtype
196
+
197
+
198
+ def default_float_type():
199
+ """Gets the default float type.
200
+
201
+ Returns:
202
+ If `is_prefer_float32()` is false and `is_allow_float64()` is true, returns
203
+ float64; otherwise returns float32.
204
+ """
205
+ if not is_prefer_float32() and is_allow_float64():
206
+ return float64
207
+ else:
208
+ return float32
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_math_ops.py ADDED
@@ -0,0 +1,1642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Mathematical operations."""
16
+ # pylint: disable=g-direct-tensorflow-import
17
+
18
+ import numbers
19
+ import sys
20
+
21
+ import numpy as np
22
+
23
+ from tensorflow.python.framework import constant_op
24
+ from tensorflow.python.framework import dtypes
25
+ from tensorflow.python.framework import errors
26
+ from tensorflow.python.framework import ops
27
+ from tensorflow.python.framework import tensor
28
+ from tensorflow.python.ops import array_ops
29
+ from tensorflow.python.ops import array_ops_stack
30
+ from tensorflow.python.ops import bitwise_ops
31
+ from tensorflow.python.ops import clip_ops
32
+ from tensorflow.python.ops import control_flow_assert
33
+ from tensorflow.python.ops import gen_math_ops
34
+ from tensorflow.python.ops import math_ops
35
+ from tensorflow.python.ops import nn_ops
36
+ from tensorflow.python.ops import sort_ops
37
+ from tensorflow.python.ops import special_math_ops
38
+ from tensorflow.python.ops import while_loop
39
+ from tensorflow.python.ops.numpy_ops import np_array_ops
40
+ from tensorflow.python.ops.numpy_ops import np_arrays
41
+ from tensorflow.python.ops.numpy_ops import np_dtypes
42
+ from tensorflow.python.ops.numpy_ops import np_utils
43
+ from tensorflow.python.util import tf_export
44
+
45
+
46
+ pi = np.pi
47
+ tf_export.tf_export('experimental.numpy.pi', v1=[]).export_constant(
48
+ __name__, 'pi'
49
+ )
50
+ e = np.e
51
+ tf_export.tf_export('experimental.numpy.e', v1=[]).export_constant(
52
+ __name__, 'e'
53
+ )
54
+ inf = np.inf
55
+ tf_export.tf_export('experimental.numpy.inf', v1=[]).export_constant(
56
+ __name__, 'inf'
57
+ )
58
+
59
+
60
+ @tf_export.tf_export('experimental.numpy.dot', v1=[])
61
+ @np_utils.np_doc_only('dot')
62
+ def dot(a, b): # pylint: disable=missing-docstring
63
+ def f(a, b): # pylint: disable=missing-docstring
64
+ return np_utils.cond(
65
+ np_utils.logical_or(
66
+ math_ops.equal(array_ops.rank(a), 0),
67
+ math_ops.equal(array_ops.rank(b), 0),
68
+ ),
69
+ lambda: a * b,
70
+ lambda: np_utils.cond( # pylint: disable=g-long-lambda
71
+ math_ops.equal(array_ops.rank(b), 1),
72
+ lambda: math_ops.tensordot(a, b, axes=[[-1], [-1]]),
73
+ lambda: math_ops.tensordot(a, b, axes=[[-1], [-2]]),
74
+ ),
75
+ )
76
+
77
+ return _bin_op(f, a, b)
78
+
79
+
80
+ # TODO(wangpeng): Make element-wise ops `ufunc`s
81
+ def _bin_op(tf_fun, a, b, promote=True):
82
+ if promote:
83
+ a, b = np_array_ops._promote_dtype_binary(a, b) # pylint: disable=protected-access
84
+ else:
85
+ a = np_array_ops.array(a)
86
+ b = np_array_ops.array(b)
87
+ return tf_fun(a, b)
88
+
89
+
90
+ @tf_export.tf_export('experimental.numpy.add', v1=[])
91
+ @np_utils.np_doc('add')
92
+ def add(x1, x2):
93
+ def add_or_or(x1, x2):
94
+ if x1.dtype == dtypes.bool:
95
+ assert x2.dtype == dtypes.bool
96
+ return math_ops.logical_or(x1, x2)
97
+ return math_ops.add(x1, x2)
98
+
99
+ return _bin_op(add_or_or, x1, x2)
100
+
101
+
102
+ @tf_export.tf_export('experimental.numpy.subtract', v1=[])
103
+ @np_utils.np_doc('subtract')
104
+ def subtract(x1, x2):
105
+ return _bin_op(math_ops.subtract, x1, x2)
106
+
107
+
108
+ @tf_export.tf_export('experimental.numpy.multiply', v1=[])
109
+ @np_utils.np_doc('multiply')
110
+ def multiply(x1, x2):
111
+ def mul_or_and(x1, x2):
112
+ if x1.dtype == dtypes.bool:
113
+ assert x2.dtype == dtypes.bool
114
+ return math_ops.logical_and(x1, x2)
115
+ return math_ops.multiply(x1, x2)
116
+
117
+ return _bin_op(mul_or_and, x1, x2)
118
+
119
+
120
+ @tf_export.tf_export('experimental.numpy.true_divide', v1=[])
121
+ @np_utils.np_doc('true_divide')
122
+ def true_divide(x1, x2): # pylint: disable=missing-function-docstring
123
+ def _avoid_float64(x1, x2):
124
+ if x1.dtype == x2.dtype and x1.dtype in (dtypes.int32, dtypes.int64):
125
+ x1 = math_ops.cast(x1, dtype=dtypes.float32)
126
+ x2 = math_ops.cast(x2, dtype=dtypes.float32)
127
+ return x1, x2
128
+
129
+ def f(x1, x2):
130
+ if x1.dtype == dtypes.bool:
131
+ assert x2.dtype == dtypes.bool
132
+ float_ = np_utils.result_type(float)
133
+ x1 = math_ops.cast(x1, float_)
134
+ x2 = math_ops.cast(x2, float_)
135
+ if not np_dtypes.is_allow_float64():
136
+ # math_ops.truediv in Python3 produces float64 when both inputs are int32
137
+ # or int64. We want to avoid that when is_allow_float64() is False.
138
+ x1, x2 = _avoid_float64(x1, x2)
139
+ return math_ops.truediv(x1, x2)
140
+
141
+ return _bin_op(f, x1, x2)
142
+
143
+
144
+ @tf_export.tf_export('experimental.numpy.divide', v1=[])
145
+ @np_utils.np_doc('divide')
146
+ def divide(x1, x2): # pylint: disable=missing-function-docstring
147
+ return true_divide(x1, x2)
148
+
149
+
150
+ @tf_export.tf_export('experimental.numpy.floor_divide', v1=[])
151
+ @np_utils.np_doc('floor_divide')
152
+ def floor_divide(x1, x2): # pylint: disable=missing-function-docstring
153
+ def f(x1, x2):
154
+ if x1.dtype == dtypes.bool:
155
+ assert x2.dtype == dtypes.bool
156
+ x1 = math_ops.cast(x1, dtypes.int8)
157
+ x2 = math_ops.cast(x2, dtypes.int8)
158
+ return math_ops.floordiv(x1, x2)
159
+
160
+ return _bin_op(f, x1, x2)
161
+
162
+
163
+ @tf_export.tf_export('experimental.numpy.mod', v1=[])
164
+ @np_utils.np_doc('mod')
165
+ def mod(x1, x2): # pylint: disable=missing-function-docstring
166
+ def f(x1, x2):
167
+ if x1.dtype == dtypes.bool:
168
+ assert x2.dtype == dtypes.bool
169
+ x1 = math_ops.cast(x1, dtypes.int8)
170
+ x2 = math_ops.cast(x2, dtypes.int8)
171
+ return math_ops.mod(x1, x2)
172
+
173
+ return _bin_op(f, x1, x2)
174
+
175
+
176
+ @tf_export.tf_export('experimental.numpy.remainder', v1=[])
177
+ @np_utils.np_doc('remainder')
178
+ def remainder(x1, x2): # pylint: disable=missing-function-docstring
179
+ return mod(x1, x2)
180
+
181
+
182
+ @tf_export.tf_export('experimental.numpy.divmod', v1=[])
183
+ @np_utils.np_doc('divmod')
184
+ def divmod(x1, x2): # pylint: disable=redefined-builtin
185
+ return floor_divide(x1, x2), mod(x1, x2)
186
+
187
+
188
+ @tf_export.tf_export('experimental.numpy.maximum', v1=[])
189
+ @np_utils.np_doc('maximum')
190
+ def maximum(x1, x2): # pylint: disable=missing-function-docstring
191
+ # Fast path for when maximum is used as relu.
192
+ if (
193
+ isinstance(x2, numbers.Real)
194
+ and not isinstance(x2, bool)
195
+ and x2 == 0
196
+ and isinstance(x1, np_arrays.ndarray)
197
+ and x1.dtype != dtypes.bool
198
+ ):
199
+ return nn_ops.relu(np_array_ops.asarray(x1))
200
+
201
+ def max_or_or(x1, x2):
202
+ if x1.dtype == dtypes.bool:
203
+ assert x2.dtype == dtypes.bool
204
+ return math_ops.logical_or(x1, x2)
205
+ return math_ops.maximum(x1, x2)
206
+
207
+ return _bin_op(max_or_or, x1, x2)
208
+
209
+
210
+ @tf_export.tf_export('experimental.numpy.minimum', v1=[])
211
+ @np_utils.np_doc('minimum')
212
+ def minimum(x1, x2):
213
+ def min_or_and(x1, x2):
214
+ if x1.dtype == dtypes.bool:
215
+ assert x2.dtype == dtypes.bool
216
+ return math_ops.logical_and(x1, x2)
217
+ return math_ops.minimum(x1, x2)
218
+
219
+ return _bin_op(min_or_and, x1, x2)
220
+
221
+
222
+ @tf_export.tf_export('experimental.numpy.clip', v1=[])
223
+ @np_utils.np_doc('clip')
224
+ def clip(a, a_min, a_max): # pylint: disable=missing-docstring
225
+ if a_min is None and a_max is None:
226
+ raise ValueError('Not more than one of `a_min` and `a_max` may be `None`.')
227
+ if a_min is None:
228
+ return minimum(a, a_max)
229
+ elif a_max is None:
230
+ return maximum(a, a_min)
231
+ else:
232
+ a, a_min, a_max = np_array_ops._promote_dtype(a, a_min, a_max) # pylint: disable=protected-access
233
+ return clip_ops.clip_by_value(*np_utils.tf_broadcast(a, a_min, a_max))
234
+
235
+
236
+ @tf_export.tf_export('experimental.numpy.matmul', v1=[])
237
+ @np_utils.np_doc('matmul')
238
+ def matmul(x1, x2): # pylint: disable=missing-docstring
239
+ def f(x1, x2):
240
+ try:
241
+ if x1._rank() == 2 and x2._rank() == 2: # pylint: disable=protected-access
242
+ # Fast path for known ranks.
243
+ return gen_math_ops.mat_mul(x1, x2)
244
+ return np_utils.cond(
245
+ math_ops.equal(np_utils.tf_rank(x2), 1),
246
+ lambda: math_ops.tensordot(x1, x2, axes=1),
247
+ lambda: np_utils.cond( # pylint: disable=g-long-lambda
248
+ math_ops.equal(np_utils.tf_rank(x1), 1),
249
+ lambda: math_ops.tensordot( # pylint: disable=g-long-lambda
250
+ x1, x2, axes=[[0], [-2]]
251
+ ),
252
+ lambda: math_ops.matmul(x1, x2),
253
+ ),
254
+ )
255
+ except errors.InvalidArgumentError as err:
256
+ raise ValueError(str(err)).with_traceback(sys.exc_info()[2])
257
+
258
+ return _bin_op(f, x1, x2)
259
+
260
+
261
+ # Exported so it can be called from Tensor.__matmul__. NumPy's matmul handles
262
+ # batched matmul as well, so simply including promotion in TF's current
263
+ # __matmul__ implementation was not sufficient.
264
+ setattr(np_arrays.ndarray, '_matmul', matmul)
265
+
266
+
267
+ @tf_export.tf_export('experimental.numpy.tensordot', v1=[])
268
+ @np_utils.np_doc('tensordot')
269
+ def tensordot(a, b, axes=2):
270
+ return _bin_op(lambda a, b: math_ops.tensordot(a, b, axes=axes), a, b)
271
+
272
+
273
+ @tf_export.tf_export('experimental.numpy.inner', v1=[])
274
+ @np_utils.np_doc_only('inner')
275
+ def inner(a, b): # pylint: disable=missing-function-docstring
276
+ def f(a, b):
277
+ return np_utils.cond(
278
+ np_utils.logical_or(
279
+ math_ops.equal(array_ops.rank(a), 0),
280
+ math_ops.equal(array_ops.rank(b), 0),
281
+ ),
282
+ lambda: a * b,
283
+ lambda: math_ops.tensordot(a, b, axes=[[-1], [-1]]),
284
+ )
285
+
286
+ return _bin_op(f, a, b)
287
+
288
+
289
+ @tf_export.tf_export('experimental.numpy.cross', v1=[])
290
+ @np_utils.np_doc('cross')
291
+ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=missing-docstring
292
+ def f(a, b): # pylint: disable=missing-docstring
293
+ # We can't assign to captured variable `axisa`, so make a new variable
294
+ if axis is None:
295
+ axis_a = axisa
296
+ axis_b = axisb
297
+ axis_c = axisc
298
+ else:
299
+ axis_a = axis
300
+ axis_b = axis
301
+ axis_c = axis
302
+ if axis_a < 0:
303
+ axis_a = np_utils.add(axis_a, array_ops.rank(a))
304
+ if axis_b < 0:
305
+ axis_b = np_utils.add(axis_b, array_ops.rank(b))
306
+
307
+ def maybe_move_axis_to_last(a, axis):
308
+ def move_axis_to_last(a, axis):
309
+ return array_ops.transpose(
310
+ a,
311
+ array_ops.concat(
312
+ [
313
+ math_ops.range(axis),
314
+ math_ops.range(axis + 1, array_ops.rank(a)),
315
+ [axis],
316
+ ],
317
+ axis=0,
318
+ ),
319
+ )
320
+
321
+ return np_utils.cond(
322
+ axis == np_utils.subtract(array_ops.rank(a), 1),
323
+ lambda: a,
324
+ lambda: move_axis_to_last(a, axis),
325
+ )
326
+
327
+ a = maybe_move_axis_to_last(a, axis_a)
328
+ b = maybe_move_axis_to_last(b, axis_b)
329
+ a_dim = np_utils.getitem(array_ops.shape(a), -1)
330
+ b_dim = np_utils.getitem(array_ops.shape(b), -1)
331
+
332
+ def maybe_pad_0(a, size_of_last_dim):
333
+ def pad_0(a):
334
+ return array_ops.pad(
335
+ a,
336
+ array_ops.concat(
337
+ [
338
+ array_ops.zeros([array_ops.rank(a) - 1, 2], dtypes.int32),
339
+ constant_op.constant([[0, 1]], dtypes.int32),
340
+ ],
341
+ axis=0,
342
+ ),
343
+ )
344
+
345
+ return np_utils.cond(
346
+ math_ops.equal(size_of_last_dim, 2), lambda: pad_0(a), lambda: a
347
+ )
348
+
349
+ a = maybe_pad_0(a, a_dim)
350
+ b = maybe_pad_0(b, b_dim)
351
+ c = math_ops.cross(*np_utils.tf_broadcast(a, b))
352
+ if axis_c < 0:
353
+ axis_c = np_utils.add(axis_c, array_ops.rank(c))
354
+
355
+ def move_last_to_axis(a, axis):
356
+ r = array_ops.rank(a)
357
+ return array_ops.transpose(
358
+ a,
359
+ array_ops.concat(
360
+ [math_ops.range(axis), [r - 1], math_ops.range(axis, r - 1)],
361
+ axis=0,
362
+ ),
363
+ )
364
+
365
+ c = np_utils.cond(
366
+ (a_dim == 2) & (b_dim == 2),
367
+ lambda: c[..., 2],
368
+ lambda: np_utils.cond( # pylint: disable=g-long-lambda
369
+ axis_c == np_utils.subtract(array_ops.rank(c), 1),
370
+ lambda: c,
371
+ lambda: move_last_to_axis(c, axis_c),
372
+ ),
373
+ )
374
+ return c
375
+
376
+ return _bin_op(f, a, b)
377
+
378
+
379
+ @tf_export.tf_export('experimental.numpy.vdot', v1=[])
380
+ @np_utils.np_doc_only('vdot')
381
+ def vdot(a, b): # pylint: disable=missing-docstring
382
+ a, b = np_array_ops._promote_dtype(a, b) # pylint: disable=protected-access
383
+ a = np_array_ops.reshape(a, [-1])
384
+ b = np_array_ops.reshape(b, [-1])
385
+ if a.dtype == np_dtypes.complex128 or a.dtype == np_dtypes.complex64:
386
+ a = conj(a)
387
+ return dot(a, b)
388
+
389
+
390
+ @tf_export.tf_export('experimental.numpy.power', v1=[])
391
+ @np_utils.np_doc('power')
392
+ def power(x1, x2):
393
+ return _bin_op(math_ops.pow, x1, x2)
394
+
395
+
396
+ @tf_export.tf_export('experimental.numpy.float_power', v1=[])
397
+ @np_utils.np_doc('float_power')
398
+ def float_power(x1, x2):
399
+ return power(x1, x2)
400
+
401
+
402
+ @tf_export.tf_export('experimental.numpy.arctan2', v1=[])
403
+ @np_utils.np_doc('arctan2')
404
+ def arctan2(x1, x2):
405
+ return _bin_op(math_ops.atan2, x1, x2)
406
+
407
+
408
+ @tf_export.tf_export('experimental.numpy.nextafter', v1=[])
409
+ @np_utils.np_doc('nextafter')
410
+ def nextafter(x1, x2):
411
+ return _bin_op(math_ops.nextafter, x1, x2)
412
+
413
+
414
+ @tf_export.tf_export('experimental.numpy.heaviside', v1=[])
415
+ @np_utils.np_doc('heaviside')
416
+ def heaviside(x1, x2): # pylint: disable=missing-function-docstring
417
+ def f(x1, x2):
418
+ return array_ops.where_v2(
419
+ x1 < 0,
420
+ constant_op.constant(0, dtype=x2.dtype),
421
+ array_ops.where_v2(x1 > 0, constant_op.constant(1, dtype=x2.dtype), x2),
422
+ )
423
+
424
+ y = _bin_op(f, x1, x2)
425
+ if not np.issubdtype(y.dtype.as_numpy_dtype, np.inexact):
426
+ y = y.astype(np_utils.result_type(float))
427
+ return y
428
+
429
+
430
+ @tf_export.tf_export('experimental.numpy.hypot', v1=[])
431
+ @np_utils.np_doc('hypot')
432
+ def hypot(x1, x2):
433
+ return sqrt(square(x1) + square(x2))
434
+
435
+
436
+ @tf_export.tf_export('experimental.numpy.kron', v1=[])
437
+ @np_utils.np_doc('kron')
438
+ def kron(a, b): # pylint: disable=missing-function-docstring
439
+ # pylint: disable=protected-access,g-complex-comprehension
440
+ a, b = np_array_ops._promote_dtype(a, b)
441
+ t_a = np_utils.cond(
442
+ a.shape.rank < b.shape.rank,
443
+ lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda
444
+ a, np_array_ops._pad_left_to(b.shape.rank, a.shape)
445
+ ),
446
+ lambda: a,
447
+ )
448
+ t_b = np_utils.cond(
449
+ b.shape.rank < a.shape.rank,
450
+ lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda
451
+ b, np_array_ops._pad_left_to(a.shape.rank, b.shape)
452
+ ),
453
+ lambda: b,
454
+ )
455
+
456
+ def _make_shape(shape, prepend):
457
+ ones = array_ops.ones_like(shape)
458
+ if prepend:
459
+ shapes = [ones, shape]
460
+ else:
461
+ shapes = [shape, ones]
462
+ return array_ops.reshape(array_ops_stack.stack(shapes, axis=1), [-1])
463
+
464
+ a_shape = array_ops.shape(t_a)
465
+ b_shape = array_ops.shape(t_b)
466
+ a_reshaped = np_array_ops.reshape(t_a, _make_shape(a_shape, False))
467
+ b_reshaped = np_array_ops.reshape(t_b, _make_shape(b_shape, True))
468
+ out_shape = a_shape * b_shape
469
+ return np_array_ops.reshape(a_reshaped * b_reshaped, out_shape)
470
+
471
+
472
+ @tf_export.tf_export('experimental.numpy.outer', v1=[])
473
+ @np_utils.np_doc('outer')
474
+ def outer(a, b):
475
+ def f(a, b):
476
+ return array_ops.reshape(a, [-1, 1]) * array_ops.reshape(b, [-1])
477
+
478
+ return _bin_op(f, a, b)
479
+
480
+
481
+ # This can also be implemented via tf.reduce_logsumexp
482
+ @tf_export.tf_export('experimental.numpy.logaddexp', v1=[])
483
+ @np_utils.np_doc('logaddexp')
484
+ def logaddexp(x1, x2):
485
+ amax = maximum(x1, x2)
486
+ delta = x1 - x2
487
+ return np_array_ops.where(
488
+ isnan(delta),
489
+ x1 + x2, # NaNs or infinities of the same sign.
490
+ amax + log1p(exp(-abs(delta))),
491
+ )
492
+
493
+
494
+ @tf_export.tf_export('experimental.numpy.logaddexp2', v1=[])
495
+ @np_utils.np_doc('logaddexp2')
496
+ def logaddexp2(x1, x2):
497
+ amax = maximum(x1, x2)
498
+ delta = x1 - x2
499
+ return np_array_ops.where(
500
+ isnan(delta),
501
+ x1 + x2, # NaNs or infinities of the same sign.
502
+ amax + log1p(exp2(-abs(delta))) / np.log(2),
503
+ )
504
+
505
+
506
+ @tf_export.tf_export('experimental.numpy.polyval', v1=[])
507
+ @np_utils.np_doc('polyval')
508
+ def polyval(p, x): # pylint: disable=missing-function-docstring
509
+ def f(p, x):
510
+ if p.shape.rank == 0:
511
+ p = array_ops.reshape(p, [1])
512
+ p = array_ops_stack.unstack(p)
513
+ # TODO(wangpeng): Make tf version take a tensor for p instead of a list.
514
+ y = math_ops.polyval(p, x)
515
+ # If the polynomial is 0-order, numpy requires the result to be broadcast to
516
+ # `x`'s shape.
517
+ if len(p) == 1:
518
+ y = array_ops.broadcast_to(y, x.shape)
519
+ return y
520
+
521
+ return _bin_op(f, p, x)
522
+
523
+
524
+ @tf_export.tf_export('experimental.numpy.isclose', v1=[])
525
+ @np_utils.np_doc('isclose')
526
+ def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): # pylint: disable=missing-docstring
527
+ def f(a, b): # pylint: disable=missing-docstring
528
+ dtype = a.dtype
529
+ if np.issubdtype(dtype.as_numpy_dtype, np.inexact):
530
+ rtol_ = ops.convert_to_tensor(rtol, dtype.real_dtype)
531
+ atol_ = ops.convert_to_tensor(atol, dtype.real_dtype)
532
+ result = math_ops.abs(a - b) <= atol_ + rtol_ * math_ops.abs(b)
533
+ if equal_nan:
534
+ result = result | (math_ops.is_nan(a) & math_ops.is_nan(b))
535
+ return result
536
+ else:
537
+ return a == b
538
+
539
+ return _bin_op(f, a, b)
540
+
541
+
542
+ @tf_export.tf_export('experimental.numpy.allclose', v1=[])
543
+ @np_utils.np_doc('allclose')
544
+ def allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
545
+ return np_array_ops.all(
546
+ isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
547
+ )
548
+
549
+
550
+ def _tf_gcd(x1, x2): # pylint: disable=missing-function-docstring
551
+ def _gcd_cond_fn(_, x2):
552
+ return math_ops.reduce_any(x2 != 0)
553
+
554
+ def _gcd_body_fn(x1, x2):
555
+ # math_ops.mod will raise an error when any element of x2 is 0. To avoid
556
+ # that, we change those zeros to ones. Their values don't matter because
557
+ # they won't be used.
558
+ x2_safe = array_ops.where_v2(x2 != 0, x2, constant_op.constant(1, x2.dtype))
559
+ x1, x2 = (
560
+ array_ops.where_v2(x2 != 0, x2, x1),
561
+ array_ops.where_v2(
562
+ x2 != 0,
563
+ math_ops.mod(x1, x2_safe),
564
+ constant_op.constant(0, x2.dtype),
565
+ ),
566
+ )
567
+ return (
568
+ array_ops.where_v2(x1 < x2, x2, x1),
569
+ array_ops.where_v2(x1 < x2, x1, x2),
570
+ )
571
+
572
+ if not np.issubdtype(
573
+ x1.dtype.as_numpy_dtype, np.integer
574
+ ) or not np.issubdtype(x2.dtype.as_numpy_dtype, np.integer):
575
+ raise ValueError('Arguments to gcd must be integers.')
576
+ shape = array_ops.broadcast_dynamic_shape(
577
+ array_ops.shape(x1), array_ops.shape(x2)
578
+ )
579
+ x1 = array_ops.broadcast_to(x1, shape)
580
+ x2 = array_ops.broadcast_to(x2, shape)
581
+ value, _ = while_loop.while_loop(
582
+ _gcd_cond_fn, _gcd_body_fn, (math_ops.abs(x1), math_ops.abs(x2))
583
+ )
584
+ return value
585
+
586
+
587
+ # Note that np.gcd may not be present in some supported versions of numpy.
588
+ @tf_export.tf_export('experimental.numpy.gcd', v1=[])
589
+ @np_utils.np_doc('gcd')
590
+ def gcd(x1, x2):
591
+ return _bin_op(_tf_gcd, x1, x2)
592
+
593
+
594
+ # Note that np.lcm may not be present in some supported versions of numpy.
595
+ @tf_export.tf_export('experimental.numpy.lcm', v1=[])
596
+ @np_utils.np_doc('lcm')
597
+ def lcm(x1, x2): # pylint: disable=missing-function-docstring
598
+ def f(x1, x2):
599
+ d = _tf_gcd(x1, x2)
600
+ # Same as the `x2_safe` trick above
601
+ d_safe = array_ops.where_v2(
602
+ math_ops.equal(d, 0), constant_op.constant(1, d.dtype), d
603
+ )
604
+ x1 = math_ops.abs(x1)
605
+ x2 = math_ops.abs(x2)
606
+ return array_ops.where_v2(
607
+ math_ops.equal(d, 0),
608
+ constant_op.constant(0, d.dtype),
609
+ x1 * (x2 // d_safe),
610
+ )
611
+
612
+ return _bin_op(f, x1, x2)
613
+
614
+
615
+ def _bitwise_binary_op(tf_fn, x1, x2): # pylint: disable=missing-function-docstring
616
+ def f(x1, x2):
617
+ is_bool = x1.dtype == dtypes.bool
618
+ if is_bool:
619
+ assert x2.dtype == dtypes.bool
620
+ x1 = math_ops.cast(x1, dtypes.int8)
621
+ x2 = math_ops.cast(x2, dtypes.int8)
622
+ r = tf_fn(x1, x2)
623
+ if is_bool:
624
+ r = math_ops.cast(r, dtypes.bool)
625
+ return r
626
+
627
+ return _bin_op(f, x1, x2)
628
+
629
+
630
+ @tf_export.tf_export('experimental.numpy.bitwise_and', v1=[])
631
+ @np_utils.np_doc('bitwise_and')
632
+ def bitwise_and(x1, x2):
633
+ return _bitwise_binary_op(bitwise_ops.bitwise_and, x1, x2)
634
+
635
+
636
+ @tf_export.tf_export('experimental.numpy.bitwise_or', v1=[])
637
+ @np_utils.np_doc('bitwise_or')
638
+ def bitwise_or(x1, x2):
639
+ return _bitwise_binary_op(bitwise_ops.bitwise_or, x1, x2)
640
+
641
+
642
+ @tf_export.tf_export('experimental.numpy.bitwise_xor', v1=[])
643
+ @np_utils.np_doc('bitwise_xor')
644
+ def bitwise_xor(x1, x2):
645
+ return _bitwise_binary_op(bitwise_ops.bitwise_xor, x1, x2)
646
+
647
+
648
+ @tf_export.tf_export('experimental.numpy.bitwise_not', v1=[])
649
+ @np_utils.np_doc('bitwise_not', link=np_utils.AliasOf('invert'))
650
+ def bitwise_not(x):
651
+ def f(x):
652
+ if x.dtype == dtypes.bool:
653
+ return math_ops.logical_not(x)
654
+ return bitwise_ops.invert(x)
655
+
656
+ return _scalar(f, x)
657
+
658
+
659
+ def _scalar(tf_fn, x, promote_to_float=False):
660
+ """Computes the tf_fn(x) for each element in `x`.
661
+
662
+ Args:
663
+ tf_fn: function that takes a single Tensor argument.
664
+ x: array_like. Could be an ndarray, a Tensor or any object that can be
665
+ converted to a Tensor using `ops.convert_to_tensor`.
666
+ promote_to_float: whether to cast the argument to a float dtype if it is not
667
+ already.
668
+
669
+ Returns:
670
+ An ndarray with the same shape as `x`. The default output dtype is
671
+ determined by `np_utils.result_type(float)`, unless x is an ndarray with a
672
+ floating point type, in which case the output type is same as x.dtype.
673
+ """
674
+ x = np_array_ops.asarray(x)
675
+ if promote_to_float and not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact):
676
+ x = x.astype(np_utils.result_type(float))
677
+ return tf_fn(x)
678
+
679
+
680
+ @tf_export.tf_export('experimental.numpy.log', v1=[])
681
+ @np_utils.np_doc('log')
682
+ def log(x):
683
+ return _scalar(math_ops.log, x, True)
684
+
685
+
686
+ @tf_export.tf_export('experimental.numpy.exp', v1=[])
687
+ @np_utils.np_doc('exp')
688
+ def exp(x):
689
+ return _scalar(math_ops.exp, x, True)
690
+
691
+
692
+ @tf_export.tf_export('experimental.numpy.sqrt', v1=[])
693
+ @np_utils.np_doc('sqrt')
694
+ def sqrt(x):
695
+ return _scalar(math_ops.sqrt, x, True)
696
+
697
+
698
+ @tf_export.tf_export('experimental.numpy.abs', v1=[])
699
+ @np_utils.np_doc('abs', link=np_utils.AliasOf('absolute'))
700
+ def abs(x): # pylint: disable=redefined-builtin
701
+ return _scalar(math_ops.abs, x)
702
+
703
+
704
+ @tf_export.tf_export('experimental.numpy.absolute', v1=[])
705
+ @np_utils.np_doc('absolute')
706
+ def absolute(x):
707
+ return abs(x)
708
+
709
+
710
+ @tf_export.tf_export('experimental.numpy.fabs', v1=[])
711
+ @np_utils.np_doc('fabs')
712
+ def fabs(x):
713
+ return abs(x)
714
+
715
+
716
+ @tf_export.tf_export('experimental.numpy.ceil', v1=[])
717
+ @np_utils.np_doc('ceil')
718
+ def ceil(x):
719
+ return _scalar(math_ops.ceil, x, True)
720
+
721
+
722
+ @tf_export.tf_export('experimental.numpy.floor', v1=[])
723
+ @np_utils.np_doc('floor')
724
+ def floor(x):
725
+ return _scalar(math_ops.floor, x, True)
726
+
727
+
728
+ @tf_export.tf_export('experimental.numpy.conj', v1=[])
729
+ @np_utils.np_doc('conj')
730
+ def conj(x):
731
+ return _scalar(math_ops.conj, x)
732
+
733
+
734
+ @tf_export.tf_export('experimental.numpy.negative', v1=[])
735
+ @np_utils.np_doc('negative')
736
+ def negative(x):
737
+ return _scalar(math_ops.negative, x)
738
+
739
+
740
+ @tf_export.tf_export('experimental.numpy.reciprocal', v1=[])
741
+ @np_utils.np_doc('reciprocal')
742
+ def reciprocal(x):
743
+ return _scalar(math_ops.reciprocal, x)
744
+
745
+
746
+ @tf_export.tf_export('experimental.numpy.signbit', v1=[])
747
+ @np_utils.np_doc('signbit')
748
+ def signbit(x):
749
+ def f(x):
750
+ if x.dtype == dtypes.bool:
751
+ return array_ops.fill(array_ops.shape(x), False)
752
+ return x < 0
753
+
754
+ return _scalar(f, x)
755
+
756
+
757
+ @tf_export.tf_export('experimental.numpy.sin', v1=[])
758
+ @np_utils.np_doc('sin')
759
+ def sin(x):
760
+ return _scalar(math_ops.sin, x, True)
761
+
762
+
763
+ @tf_export.tf_export('experimental.numpy.cos', v1=[])
764
+ @np_utils.np_doc('cos')
765
+ def cos(x):
766
+ return _scalar(math_ops.cos, x, True)
767
+
768
+
769
+ @tf_export.tf_export('experimental.numpy.tan', v1=[])
770
+ @np_utils.np_doc('tan')
771
+ def tan(x):
772
+ return _scalar(math_ops.tan, x, True)
773
+
774
+
775
+ @tf_export.tf_export('experimental.numpy.sinh', v1=[])
776
+ @np_utils.np_doc('sinh')
777
+ def sinh(x):
778
+ return _scalar(math_ops.sinh, x, True)
779
+
780
+
781
+ @tf_export.tf_export('experimental.numpy.cosh', v1=[])
782
+ @np_utils.np_doc('cosh')
783
+ def cosh(x):
784
+ return _scalar(math_ops.cosh, x, True)
785
+
786
+
787
+ @tf_export.tf_export('experimental.numpy.tanh', v1=[])
788
+ @np_utils.np_doc('tanh')
789
+ def tanh(x):
790
+ return _scalar(math_ops.tanh, x, True)
791
+
792
+
793
+ @tf_export.tf_export('experimental.numpy.arcsin', v1=[])
794
+ @np_utils.np_doc('arcsin')
795
+ def arcsin(x):
796
+ return _scalar(math_ops.asin, x, True)
797
+
798
+
799
+ @tf_export.tf_export('experimental.numpy.arccos', v1=[])
800
+ @np_utils.np_doc('arccos')
801
+ def arccos(x):
802
+ return _scalar(math_ops.acos, x, True)
803
+
804
+
805
+ @tf_export.tf_export('experimental.numpy.arctan', v1=[])
806
+ @np_utils.np_doc('arctan')
807
+ def arctan(x):
808
+ return _scalar(math_ops.atan, x, True)
809
+
810
+
811
+ @tf_export.tf_export('experimental.numpy.arcsinh', v1=[])
812
+ @np_utils.np_doc('arcsinh')
813
+ def arcsinh(x):
814
+ return _scalar(math_ops.asinh, x, True)
815
+
816
+
817
+ @tf_export.tf_export('experimental.numpy.arccosh', v1=[])
818
+ @np_utils.np_doc('arccosh')
819
+ def arccosh(x):
820
+ return _scalar(math_ops.acosh, x, True)
821
+
822
+
823
+ @tf_export.tf_export('experimental.numpy.arctanh', v1=[])
824
+ @np_utils.np_doc('arctanh')
825
+ def arctanh(x):
826
+ return _scalar(math_ops.atanh, x, True)
827
+
828
+
829
+ @tf_export.tf_export('experimental.numpy.deg2rad', v1=[])
830
+ @np_utils.np_doc('deg2rad')
831
+ def deg2rad(x):
832
+ def f(x):
833
+ return x * (np.pi / 180.0)
834
+
835
+ return _scalar(f, x, True)
836
+
837
+
838
+ @tf_export.tf_export('experimental.numpy.rad2deg', v1=[])
839
+ @np_utils.np_doc('rad2deg')
840
+ def rad2deg(x):
841
+ return x * (180.0 / np.pi)
842
+
843
+
844
+ _tf_float_types = [
845
+ dtypes.bfloat16,
846
+ dtypes.float16,
847
+ dtypes.float32,
848
+ dtypes.float64,
849
+ ]
850
+
851
+
852
+ @tf_export.tf_export('experimental.numpy.angle', v1=[])
853
+ @np_utils.np_doc('angle')
854
+ def angle(z, deg=False): # pylint: disable=missing-function-docstring
855
+ def f(x):
856
+ if x.dtype in _tf_float_types:
857
+ # Workaround for b/147515503
858
+ return array_ops.where_v2(x < 0, np.pi, 0)
859
+ else:
860
+ return math_ops.angle(x)
861
+
862
+ y = _scalar(f, z, True)
863
+ if deg:
864
+ y = rad2deg(y)
865
+ return y
866
+
867
+
868
+ @tf_export.tf_export('experimental.numpy.cbrt', v1=[])
869
+ @np_utils.np_doc('cbrt')
870
+ def cbrt(x):
871
+ def f(x):
872
+ # __pow__ can't handle negative base, so we use `abs` here.
873
+ rt = math_ops.abs(x) ** (1.0 / 3)
874
+ return array_ops.where_v2(x < 0, -rt, rt)
875
+
876
+ return _scalar(f, x, True)
877
+
878
+
879
+ @tf_export.tf_export('experimental.numpy.conjugate', v1=[])
880
+ @np_utils.np_doc('conjugate', link=np_utils.AliasOf('conj'))
881
+ def conjugate(x):
882
+ return _scalar(math_ops.conj, x)
883
+
884
+
885
+ @tf_export.tf_export('experimental.numpy.exp2', v1=[])
886
+ @np_utils.np_doc('exp2')
887
+ def exp2(x):
888
+ def f(x):
889
+ return 2**x
890
+
891
+ return _scalar(f, x, True)
892
+
893
+
894
+ @tf_export.tf_export('experimental.numpy.expm1', v1=[])
895
+ @np_utils.np_doc('expm1')
896
+ def expm1(x):
897
+ return _scalar(math_ops.expm1, x, True)
898
+
899
+
900
+ @tf_export.tf_export('experimental.numpy.fix', v1=[])
901
+ @np_utils.np_doc('fix')
902
+ def fix(x):
903
+ def f(x):
904
+ return array_ops.where_v2(x < 0, math_ops.ceil(x), math_ops.floor(x))
905
+
906
+ return _scalar(f, x, True)
907
+
908
+
909
+ @tf_export.tf_export('experimental.numpy.iscomplex', v1=[])
910
+ @np_utils.np_doc('iscomplex')
911
+ def iscomplex(x):
912
+ return np_array_ops.imag(x) != 0
913
+
914
+
915
+ @tf_export.tf_export('experimental.numpy.isreal', v1=[])
916
+ @np_utils.np_doc('isreal')
917
+ def isreal(x):
918
+ return np_array_ops.imag(x) == 0
919
+
920
+
921
+ @tf_export.tf_export('experimental.numpy.iscomplexobj', v1=[])
922
+ @np_utils.np_doc('iscomplexobj')
923
+ def iscomplexobj(x):
924
+ x = np_array_ops.array(x)
925
+ return np.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating)
926
+
927
+
928
+ @tf_export.tf_export('experimental.numpy.isrealobj', v1=[])
929
+ @np_utils.np_doc('isrealobj')
930
+ def isrealobj(x):
931
+ return not iscomplexobj(x)
932
+
933
+
934
+ @tf_export.tf_export('experimental.numpy.isnan', v1=[])
935
+ @np_utils.np_doc('isnan')
936
+ def isnan(x):
937
+ return _scalar(math_ops.is_nan, x, True)
938
+
939
+
940
+ def _make_nan_reduction(np_fun_name, reduction, init_val):
941
+ """Helper to generate nan* functions."""
942
+
943
+ @np_utils.np_doc(np_fun_name)
944
+ def nan_reduction(a, axis=None, dtype=None, keepdims=False):
945
+ a = np_array_ops.array(a)
946
+ v = np_array_ops.array(init_val, dtype=a.dtype)
947
+ return reduction(
948
+ np_array_ops.where(isnan(a), v, a),
949
+ axis=axis,
950
+ dtype=dtype,
951
+ keepdims=keepdims,
952
+ )
953
+
954
+ return nan_reduction
955
+
956
+
957
+ nansum = tf_export.tf_export('experimental.numpy.nansum', v1=[])(
958
+ _make_nan_reduction('nansum', np_array_ops.sum, 0)
959
+ )
960
+ nanprod = tf_export.tf_export('experimental.numpy.nanprod', v1=[])(
961
+ _make_nan_reduction('nanprod', np_array_ops.prod, 1)
962
+ )
963
+
964
+
965
+ @tf_export.tf_export('experimental.numpy.nanmean', v1=[])
966
+ @np_utils.np_doc('nanmean')
967
+ def nanmean(a, axis=None, dtype=None, keepdims=None): # pylint: disable=missing-docstring
968
+ a = np_array_ops.array(a)
969
+ if np.issubdtype(a.dtype.as_numpy_dtype, np.bool_) or np.issubdtype(
970
+ a.dtype.as_numpy_dtype, np.integer
971
+ ):
972
+ return np_array_ops.mean(a, axis=axis, dtype=dtype, keepdims=keepdims)
973
+ nan_mask = logical_not(isnan(a))
974
+ if dtype is None:
975
+ dtype = a.dtype.as_numpy_dtype
976
+ normalizer = np_array_ops.sum(
977
+ nan_mask, axis=axis, dtype=dtype, keepdims=keepdims
978
+ )
979
+ return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer
980
+
981
+
982
+ @tf_export.tf_export('experimental.numpy.isfinite', v1=[])
983
+ @np_utils.np_doc('isfinite')
984
+ def isfinite(x):
985
+ return _scalar(math_ops.is_finite, x, True)
986
+
987
+
988
+ @tf_export.tf_export('experimental.numpy.isinf', v1=[])
989
+ @np_utils.np_doc('isinf')
990
+ def isinf(x):
991
+ if x.dtype.is_floating:
992
+ return _scalar(math_ops.is_inf, x, True)
993
+ return False
994
+
995
+
996
+ @tf_export.tf_export('experimental.numpy.isneginf', v1=[])
997
+ @np_utils.np_doc('isneginf')
998
+ def isneginf(x):
999
+ if x.dtype.is_floating:
1000
+ return x == np_array_ops.full_like(x, -np.inf)
1001
+ return False
1002
+
1003
+
1004
+ @tf_export.tf_export('experimental.numpy.isposinf', v1=[])
1005
+ @np_utils.np_doc('isposinf')
1006
+ def isposinf(x):
1007
+ if x.dtype.is_floating:
1008
+ return x == np_array_ops.full_like(x, np.inf)
1009
+ return False
1010
+
1011
+
1012
+ @tf_export.tf_export('experimental.numpy.log2', v1=[])
1013
+ @np_utils.np_doc('log2')
1014
+ def log2(x):
1015
+ return log(x) / np.log(2)
1016
+
1017
+
1018
+ @tf_export.tf_export('experimental.numpy.log10', v1=[])
1019
+ @np_utils.np_doc('log10')
1020
+ def log10(x):
1021
+ return log(x) / np.log(10)
1022
+
1023
+
1024
+ @tf_export.tf_export('experimental.numpy.log1p', v1=[])
1025
+ @np_utils.np_doc('log1p')
1026
+ def log1p(x):
1027
+ return _scalar(math_ops.log1p, x, True)
1028
+
1029
+
1030
+ @tf_export.tf_export('experimental.numpy.positive', v1=[])
1031
+ @np_utils.np_doc('positive')
1032
+ def positive(x):
1033
+ return _scalar(lambda x: x, x)
1034
+
1035
+
1036
+ @tf_export.tf_export('experimental.numpy.sinc', v1=[])
1037
+ @np_utils.np_doc('sinc')
1038
+ def sinc(x):
1039
+ def f(x):
1040
+ pi_x = x * np.pi
1041
+ return array_ops.where_v2(
1042
+ x == 0, array_ops.ones_like(x), math_ops.sin(pi_x) / pi_x
1043
+ )
1044
+
1045
+ return _scalar(f, x, True)
1046
+
1047
+
1048
+ @tf_export.tf_export('experimental.numpy.square', v1=[])
1049
+ @np_utils.np_doc('square')
1050
+ def square(x):
1051
+ return _scalar(math_ops.square, x)
1052
+
1053
+
1054
+ @tf_export.tf_export('experimental.numpy.diff', v1=[])
1055
+ @np_utils.np_doc('diff')
1056
+ def diff(a, n=1, axis=-1): # pylint: disable=missing-function-docstring
1057
+ def f(a):
1058
+ # TODO(agarwal): transpose and reshape to N, H, 1 and do a 1D convolution
1059
+ # TODO(agarwal): avoid depending on static rank.
1060
+ nd = a.shape.rank
1061
+ if nd is None:
1062
+ raise ValueError(
1063
+ 'Function `diff` currently requires a known rank for input `a`. '
1064
+ f'Received: a={a} (unknown rank)'
1065
+ )
1066
+ if (axis + nd if axis < 0 else axis) >= nd:
1067
+ raise ValueError(
1068
+ f'Argument `axis` (received axis={axis}) is out of bounds '
1069
+ f'for input {a} of rank {nd}.'
1070
+ )
1071
+ if n < 0:
1072
+ raise ValueError(
1073
+ f'Argument `order` must be a non-negative integer. Received: axis={n}'
1074
+ )
1075
+ slice1 = [slice(None)] * nd
1076
+ slice2 = [slice(None)] * nd
1077
+ slice1[axis] = slice(1, None)
1078
+ slice2[axis] = slice(None, -1)
1079
+ slice1 = tuple(slice1)
1080
+ slice2 = tuple(slice2)
1081
+ op = math_ops.not_equal if a.dtype == dtypes.bool else math_ops.subtract
1082
+ for _ in range(n):
1083
+ a = op(a[slice1], a[slice2])
1084
+ return a
1085
+
1086
+ return _scalar(f, a)
1087
+
1088
+
1089
+ def _wrap(f, reverse=False):
1090
+ """Wraps binary ops so they can be added as operator overloads on ndarray."""
1091
+
1092
+ def _f(a, b):
1093
+ if reverse:
1094
+ a, b = b, a
1095
+
1096
+ if (
1097
+ getattr(b, '__array_priority__', 0)
1098
+ > np_arrays.ndarray.__array_priority__
1099
+ ):
1100
+ return NotImplemented
1101
+
1102
+ return f(a, b)
1103
+
1104
+ return _f
1105
+
1106
+
1107
+ def _comparison(tf_fun, x1, x2, cast_bool_to_int=False):
1108
+ """Helper function for comparision."""
1109
+ dtype = np_utils.result_type(x1, x2)
1110
+ # Cast x1 and x2 to the result_type if needed.
1111
+ x1 = np_array_ops.array(x1, dtype=dtype)
1112
+ x2 = np_array_ops.array(x2, dtype=dtype)
1113
+ if cast_bool_to_int and x1.dtype == dtypes.bool:
1114
+ x1 = math_ops.cast(x1, dtypes.int32)
1115
+ x2 = math_ops.cast(x2, dtypes.int32)
1116
+ return tf_fun(x1, x2)
1117
+
1118
+
1119
+ @tf_export.tf_export('experimental.numpy.equal', v1=[])
1120
+ @np_utils.np_doc('equal')
1121
+ def equal(x1, x2):
1122
+ return _comparison(math_ops.equal, x1, x2)
1123
+
1124
+
1125
+ @tf_export.tf_export('experimental.numpy.not_equal', v1=[])
1126
+ @np_utils.np_doc('not_equal')
1127
+ def not_equal(x1, x2):
1128
+ return _comparison(math_ops.not_equal, x1, x2)
1129
+
1130
+
1131
+ @tf_export.tf_export('experimental.numpy.greater', v1=[])
1132
+ @np_utils.np_doc('greater')
1133
+ def greater(x1, x2):
1134
+ return _comparison(math_ops.greater, x1, x2, True)
1135
+
1136
+
1137
+ @tf_export.tf_export('experimental.numpy.greater_equal', v1=[])
1138
+ @np_utils.np_doc('greater_equal')
1139
+ def greater_equal(x1, x2):
1140
+ return _comparison(math_ops.greater_equal, x1, x2, True)
1141
+
1142
+
1143
+ @tf_export.tf_export('experimental.numpy.less', v1=[])
1144
+ @np_utils.np_doc('less')
1145
+ def less(x1, x2):
1146
+ return _comparison(math_ops.less, x1, x2, True)
1147
+
1148
+
1149
+ @tf_export.tf_export('experimental.numpy.less_equal', v1=[])
1150
+ @np_utils.np_doc('less_equal')
1151
+ def less_equal(x1, x2):
1152
+ return _comparison(math_ops.less_equal, x1, x2, True)
1153
+
1154
+
1155
+ @tf_export.tf_export('experimental.numpy.array_equal', v1=[])
1156
+ @np_utils.np_doc('array_equal')
1157
+ def array_equal(a1, a2): # pylint: disable=missing-function-docstring
1158
+ def f(x1, x2):
1159
+ return np_utils.cond(
1160
+ math_ops.equal(array_ops.rank(x1), array_ops.rank(x2)),
1161
+ lambda: np_utils.cond( # pylint: disable=g-long-lambda
1162
+ np_utils.reduce_all(
1163
+ math_ops.equal(array_ops.shape(x1), array_ops.shape(x2))
1164
+ ),
1165
+ lambda: math_ops.reduce_all(math_ops.equal(x1, x2)),
1166
+ lambda: constant_op.constant(False),
1167
+ ),
1168
+ lambda: constant_op.constant(False),
1169
+ )
1170
+
1171
+ return _comparison(f, a1, a2)
1172
+
1173
+
1174
+ def _logical_binary_op(tf_fun, x1, x2):
1175
+ x1 = np_array_ops.array(x1, dtype=np.bool_)
1176
+ x2 = np_array_ops.array(x2, dtype=np.bool_)
1177
+ return tf_fun(x1, x2)
1178
+
1179
+
1180
+ @tf_export.tf_export('experimental.numpy.logical_and', v1=[])
1181
+ @np_utils.np_doc('logical_and')
1182
+ def logical_and(x1, x2):
1183
+ return _logical_binary_op(math_ops.logical_and, x1, x2)
1184
+
1185
+
1186
+ @tf_export.tf_export('experimental.numpy.logical_or', v1=[])
1187
+ @np_utils.np_doc('logical_or')
1188
+ def logical_or(x1, x2):
1189
+ return _logical_binary_op(math_ops.logical_or, x1, x2)
1190
+
1191
+
1192
+ @tf_export.tf_export('experimental.numpy.logical_xor', v1=[])
1193
+ @np_utils.np_doc('logical_xor')
1194
+ def logical_xor(x1, x2):
1195
+ return _logical_binary_op(math_ops.logical_xor, x1, x2)
1196
+
1197
+
1198
+ @tf_export.tf_export('experimental.numpy.logical_not', v1=[])
1199
+ @np_utils.np_doc('logical_not')
1200
+ def logical_not(x):
1201
+ x = np_array_ops.array(x, dtype=np.bool_)
1202
+ return math_ops.logical_not(x)
1203
+
1204
+
1205
+ @tf_export.tf_export('experimental.numpy.linspace', v1=[])
1206
+ @np_utils.np_doc('linspace')
1207
+ def linspace( # pylint: disable=missing-docstring
1208
+ start, stop, num=50, endpoint=True, retstep=False, dtype=float, axis=0
1209
+ ):
1210
+ if dtype:
1211
+ dtype = np_utils.result_type(dtype)
1212
+ start = np_array_ops.array(start, dtype=dtype)
1213
+ stop = np_array_ops.array(stop, dtype=dtype)
1214
+ if num < 0:
1215
+ raise ValueError(
1216
+ 'Argument `num` (number of samples) must be a non-negative integer. '
1217
+ f'Received: num={num}'
1218
+ )
1219
+ step = ops.convert_to_tensor(np.nan)
1220
+ if endpoint:
1221
+ result = math_ops.linspace(start, stop, num, axis=axis)
1222
+ if num > 1:
1223
+ step = (stop - start) / (num - 1)
1224
+ else:
1225
+ # math_ops.linspace does not support endpoint=False so we manually handle it
1226
+ # here.
1227
+ if num > 0:
1228
+ step = (stop - start) / num
1229
+ if num > 1:
1230
+ new_stop = math_ops.cast(stop, step.dtype) - step
1231
+ start = math_ops.cast(start, new_stop.dtype)
1232
+ result = math_ops.linspace(start, new_stop, num, axis=axis)
1233
+ else:
1234
+ result = math_ops.linspace(start, stop, num, axis=axis)
1235
+ if dtype:
1236
+ if dtype.is_integer:
1237
+ # Since numpy 1.20, linspace's rounding is towards -inf instead of 0
1238
+ result = math_ops.floor(result)
1239
+ result = math_ops.cast(result, dtype)
1240
+ if retstep:
1241
+ return (result, step)
1242
+ else:
1243
+ return result
1244
+
1245
+
1246
+ @tf_export.tf_export('experimental.numpy.logspace', v1=[])
1247
+ @np_utils.np_doc('logspace')
1248
+ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
1249
+ dtype = np_utils.result_type(start, stop, dtype)
1250
+ result = linspace(
1251
+ start, stop, num=num, endpoint=endpoint, dtype=dtype, axis=axis
1252
+ )
1253
+ result = math_ops.pow(math_ops.cast(base, result.dtype), result)
1254
+ if dtype:
1255
+ result = math_ops.cast(result, dtype)
1256
+ return result
1257
+
1258
+
1259
+ @tf_export.tf_export('experimental.numpy.geomspace', v1=[])
1260
+ @np_utils.np_doc('geomspace')
1261
+ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): # pylint: disable=missing-docstring
1262
+ dtype = (
1263
+ dtypes.as_dtype(dtype) # pylint: disable=g-long-ternary
1264
+ if dtype
1265
+ else np_utils.result_type(
1266
+ start, stop, float(num), np_array_ops.zeros((), dtype)
1267
+ )
1268
+ )
1269
+ computation_dtype = np.promote_types(dtype.as_numpy_dtype, np.float32)
1270
+ start = np_array_ops.asarray(start, dtype=computation_dtype)
1271
+ stop = np_array_ops.asarray(stop, dtype=computation_dtype)
1272
+ # follow the numpy geomspace convention for negative and complex endpoints
1273
+ start_sign = 1 - np_array_ops.sign(np_array_ops.real(start))
1274
+ stop_sign = 1 - np_array_ops.sign(np_array_ops.real(stop))
1275
+ signflip = 1 - start_sign * stop_sign // 2
1276
+ res = signflip * logspace(
1277
+ log10(signflip * start),
1278
+ log10(signflip * stop),
1279
+ num,
1280
+ endpoint=endpoint,
1281
+ base=10.0,
1282
+ dtype=computation_dtype,
1283
+ axis=0,
1284
+ )
1285
+ if axis != 0:
1286
+ res = np_array_ops.moveaxis(res, 0, axis)
1287
+ return math_ops.cast(res, dtype)
1288
+
1289
+
1290
+ @tf_export.tf_export('experimental.numpy.ptp', v1=[])
1291
+ @np_utils.np_doc('ptp')
1292
+ def ptp(a, axis=None, keepdims=None):
1293
+ return np_array_ops.amax(a, axis=axis, keepdims=keepdims) - np_array_ops.amin(
1294
+ a, axis=axis, keepdims=keepdims
1295
+ )
1296
+
1297
+
1298
+ @tf_export.tf_export('experimental.numpy.concatenate', v1=[])
1299
+ @np_utils.np_doc_only('concatenate')
1300
+ def concatenate(arys, axis=0): # pylint: disable=missing-function-docstring
1301
+ if not isinstance(arys, (list, tuple)):
1302
+ arys = [arys]
1303
+ if not arys:
1304
+ raise ValueError(
1305
+ 'Need at least one array to concatenate. Received empty '
1306
+ f'input: arys={arys}'
1307
+ )
1308
+ dtype = np_utils.result_type(*arys)
1309
+ arys = [np_array_ops.array(array, dtype=dtype) for array in arys]
1310
+ return array_ops.concat(arys, axis)
1311
+
1312
+
1313
+ @tf_export.tf_export('experimental.numpy.tile', v1=[])
1314
+ @np_utils.np_doc_only('tile')
1315
+ def tile(a, reps): # pylint: disable=missing-function-docstring
1316
+ a = np_array_ops.array(a)
1317
+ reps = array_ops.reshape(np_array_ops.array(reps, dtype=dtypes.int32), [-1])
1318
+
1319
+ a_rank = array_ops.rank(a)
1320
+ reps_size = array_ops.size(reps)
1321
+ reps = array_ops.pad(
1322
+ reps, [[math_ops.maximum(a_rank - reps_size, 0), 0]], constant_values=1
1323
+ )
1324
+ a_shape = array_ops.pad(
1325
+ array_ops.shape(a),
1326
+ [[math_ops.maximum(reps_size - a_rank, 0), 0]],
1327
+ constant_values=1,
1328
+ )
1329
+ a = array_ops.reshape(a, a_shape)
1330
+
1331
+ return array_ops.tile(a, reps)
1332
+
1333
+
1334
+ @tf_export.tf_export('experimental.numpy.count_nonzero', v1=[])
1335
+ @np_utils.np_doc('count_nonzero')
1336
+ def count_nonzero(a, axis=None):
1337
+ return math_ops.count_nonzero(np_array_ops.array(a), axis)
1338
+
1339
+
1340
+ @tf_export.tf_export('experimental.numpy.argsort', v1=[])
1341
+ @np_utils.np_doc('argsort')
1342
+ def argsort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring
1343
+ # TODO(nareshmodi): make string tensors also work.
1344
+ if kind not in ('quicksort', 'stable'):
1345
+ raise ValueError(
1346
+ 'Invalid value for argument `kind`. '
1347
+ 'Only kind="quicksort" and kind="stable" are supported. '
1348
+ f'Received: kind={kind}'
1349
+ )
1350
+ if order is not None:
1351
+ raise ValueError('The `order` argument is not supported. Pass order=None')
1352
+ stable = kind == 'stable'
1353
+
1354
+ a = np_array_ops.array(a)
1355
+
1356
+ def _argsort(a, axis, stable):
1357
+ if axis is None:
1358
+ a = array_ops.reshape(a, [-1])
1359
+ axis = 0
1360
+
1361
+ return sort_ops.argsort(a, axis, stable=stable)
1362
+
1363
+ tf_ans = np_utils.cond(
1364
+ math_ops.equal(array_ops.rank(a), 0),
1365
+ lambda: constant_op.constant([0]),
1366
+ lambda: _argsort(a, axis, stable),
1367
+ )
1368
+
1369
+ if ops.is_auto_dtype_conversion_enabled():
1370
+ return np_array_ops.array(tf_ans, dtype=int)
1371
+ else:
1372
+ return np_array_ops.array(tf_ans, dtype=np.intp)
1373
+
1374
+
1375
+ @tf_export.tf_export('experimental.numpy.sort', v1=[])
1376
+ @np_utils.np_doc('sort')
1377
+ def sort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring
1378
+ if kind != 'quicksort':
1379
+ raise ValueError(
1380
+ 'Invalid value for argument `kind`. '
1381
+ 'Only kind="quicksort" is supported. '
1382
+ f'Received: kind={kind}'
1383
+ )
1384
+ if order is not None:
1385
+ raise ValueError('The `order` argument is not supported. Pass order=None')
1386
+
1387
+ a = np_array_ops.array(a)
1388
+
1389
+ if axis is None:
1390
+ return sort_ops.sort(array_ops.reshape(a, [-1]), 0)
1391
+ else:
1392
+ return sort_ops.sort(a, axis)
1393
+
1394
+
1395
+ def _argminmax(fn, a, axis=None):
1396
+ a = np_array_ops.array(a)
1397
+ if axis is None:
1398
+ # When axis is None numpy flattens the array.
1399
+ a_t = array_ops.reshape(a, [-1])
1400
+ else:
1401
+ a_t = np_array_ops.atleast_1d(a)
1402
+ return fn(input=a_t, axis=axis)
1403
+
1404
+
1405
+ @tf_export.tf_export('experimental.numpy.argmax', v1=[])
1406
+ @np_utils.np_doc('argmax')
1407
+ def argmax(a, axis=None):
1408
+ return _argminmax(math_ops.argmax, a, axis)
1409
+
1410
+
1411
+ @tf_export.tf_export('experimental.numpy.argmin', v1=[])
1412
+ @np_utils.np_doc('argmin')
1413
+ def argmin(a, axis=None):
1414
+ return _argminmax(math_ops.argmin, a, axis)
1415
+
1416
+
1417
+ @tf_export.tf_export('experimental.numpy.append', v1=[])
1418
+ @np_utils.np_doc('append')
1419
+ def append(arr, values, axis=None):
1420
+ if axis is None:
1421
+ return concatenate([np_array_ops.ravel(arr), np_array_ops.ravel(values)], 0)
1422
+ else:
1423
+ return concatenate([arr, values], axis=axis)
1424
+
1425
+
1426
+ @tf_export.tf_export('experimental.numpy.average', v1=[])
1427
+ @np_utils.np_doc('average')
1428
+ def average(a, axis=None, weights=None, returned=False): # pylint: disable=missing-docstring
1429
+ if axis is not None and not isinstance(axis, int):
1430
+ # TODO(wangpeng): Support tuple of ints as `axis`
1431
+ raise ValueError(
1432
+ 'Argument `axis` must be an integer. '
1433
+ f'Received axis={axis} (of type {type(axis)})'
1434
+ )
1435
+ a = np_array_ops.array(a)
1436
+ default_float_type = np_utils.result_type(float)
1437
+ if weights is None: # Treat all weights as 1
1438
+ if not np.issubdtype(a.dtype.as_numpy_dtype, np.inexact):
1439
+ a = a.astype(np_utils.result_type(a.dtype, default_float_type))
1440
+ avg = math_ops.reduce_mean(a, axis=axis)
1441
+ if returned:
1442
+ if axis is None:
1443
+ weights_sum = array_ops.size(a)
1444
+ else:
1445
+ weights_sum = array_ops.shape(a)[axis]
1446
+ weights_sum = math_ops.cast(weights_sum, a.dtype)
1447
+ else:
1448
+ if np.issubdtype(a.dtype.as_numpy_dtype, np.inexact):
1449
+ out_dtype = np_utils.result_type(a.dtype, weights)
1450
+ else:
1451
+ out_dtype = np_utils.result_type(a.dtype, weights, default_float_type)
1452
+ a = np_array_ops.array(a, out_dtype)
1453
+ weights = np_array_ops.array(weights, out_dtype)
1454
+
1455
+ def rank_equal_case():
1456
+ control_flow_assert.Assert(
1457
+ math_ops.reduce_all(array_ops.shape(a) == array_ops.shape(weights)),
1458
+ [array_ops.shape(a), array_ops.shape(weights)],
1459
+ )
1460
+ weights_sum = math_ops.reduce_sum(weights, axis=axis)
1461
+ avg = math_ops.reduce_sum(a * weights, axis=axis) / weights_sum
1462
+ return avg, weights_sum
1463
+
1464
+ if axis is None:
1465
+ avg, weights_sum = rank_equal_case()
1466
+ else:
1467
+
1468
+ def rank_not_equal_case():
1469
+ control_flow_assert.Assert(
1470
+ array_ops.rank(weights) == 1, [array_ops.rank(weights)]
1471
+ )
1472
+ weights_sum = math_ops.reduce_sum(weights)
1473
+ axes = ops.convert_to_tensor([[axis], [0]])
1474
+ avg = math_ops.tensordot(a, weights, axes) / weights_sum
1475
+ return avg, weights_sum
1476
+
1477
+ # We condition on rank rather than shape equality, because if we do the
1478
+ # latter, when the shapes are partially unknown but the ranks are known
1479
+ # and different, np_utils.cond will run shape checking on the true branch,
1480
+ # which will raise a shape-checking error.
1481
+ avg, weights_sum = np_utils.cond(
1482
+ math_ops.equal(array_ops.rank(a), array_ops.rank(weights)),
1483
+ rank_equal_case,
1484
+ rank_not_equal_case,
1485
+ )
1486
+
1487
+ avg = np_array_ops.array(avg)
1488
+ if returned:
1489
+ weights_sum = np_array_ops.broadcast_to(weights_sum, array_ops.shape(avg))
1490
+ return avg, weights_sum
1491
+ return avg
1492
+
1493
+
1494
+ @tf_export.tf_export('experimental.numpy.trace', v1=[])
1495
+ @np_utils.np_doc('trace')
1496
+ def trace(a, offset=0, axis1=0, axis2=1, dtype=None): # pylint: disable=missing-docstring
1497
+ if dtype:
1498
+ dtype = np_utils.result_type(dtype)
1499
+ a = np_array_ops.asarray(a, dtype)
1500
+
1501
+ if offset == 0:
1502
+ a_shape = a.shape
1503
+ if a_shape.rank is not None:
1504
+ rank = len(a_shape)
1505
+ if (axis1 == -2 or axis1 == rank - 2) and (
1506
+ axis2 == -1 or axis2 == rank - 1
1507
+ ):
1508
+ return math_ops.trace(a)
1509
+
1510
+ a = np_array_ops.diagonal(a, offset, axis1, axis2)
1511
+ return np_array_ops.sum(a, -1, dtype)
1512
+
1513
+
1514
+ @tf_export.tf_export('experimental.numpy.meshgrid', v1=[])
1515
+ @np_utils.np_doc('meshgrid')
1516
+ def meshgrid(*xi, **kwargs):
1517
+ """This currently requires copy=True and sparse=False."""
1518
+ sparse = kwargs.get('sparse', False)
1519
+ if sparse:
1520
+ raise ValueError(
1521
+ 'Function `meshgrid` does not support returning sparse arrays yet. '
1522
+ f'Received: sparse={sparse}'
1523
+ )
1524
+
1525
+ copy = kwargs.get('copy', True)
1526
+ if not copy:
1527
+ raise ValueError(
1528
+ f'Function `meshgrid` only supports copy=True. Received: copy={copy}'
1529
+ )
1530
+
1531
+ indexing = kwargs.get('indexing', 'xy')
1532
+
1533
+ xi = [np_array_ops.asarray(arg) for arg in xi]
1534
+ kwargs = {'indexing': indexing}
1535
+
1536
+ outputs = array_ops.meshgrid(*xi, **kwargs)
1537
+
1538
+ return outputs
1539
+
1540
+
1541
+ # Uses np_doc_only here because np.einsum (in 1.16) doesn't have argument
1542
+ # `subscripts`, even though the doc says it has.
1543
+ @tf_export.tf_export('experimental.numpy.einsum', v1=[])
1544
+ @np_utils.np_doc_only('einsum')
1545
+ def einsum(subscripts, *operands, **kwargs): # pylint: disable=missing-docstring
1546
+ casting = kwargs.get('casting', 'safe')
1547
+ optimize = kwargs.get('optimize', False)
1548
+ if casting == 'safe':
1549
+ operands = np_array_ops._promote_dtype(*operands) # pylint: disable=protected-access
1550
+ elif casting == 'no':
1551
+ operands = [np_array_ops.asarray(x) for x in operands]
1552
+ else:
1553
+ raise ValueError(
1554
+ 'Invalid value for argument `casting`. '
1555
+ f'Expected casting="safe" or casting="no". Received: casting={casting}'
1556
+ )
1557
+ if not optimize:
1558
+ # TF doesn't have a "no optimization" option.
1559
+ # TODO(wangpeng): Print a warning that np and tf use different
1560
+ # optimizations.
1561
+ tf_optimize = 'greedy'
1562
+ elif optimize == True: # pylint: disable=singleton-comparison,g-explicit-bool-comparison
1563
+ tf_optimize = 'greedy'
1564
+ elif optimize == 'greedy':
1565
+ tf_optimize = 'greedy'
1566
+ elif optimize == 'optimal':
1567
+ tf_optimize = 'optimal'
1568
+ else:
1569
+ raise ValueError(
1570
+ 'Invalid value for argument `optimize`. '
1571
+ 'Expected one of {True, "greedy", "optimal"}. '
1572
+ f'Received: optimize={optimize}'
1573
+ )
1574
+
1575
+ res = special_math_ops.einsum(subscripts, *operands, optimize=tf_optimize)
1576
+ return res
1577
+
1578
+
1579
+ def _tensor_t(self):
1580
+ """Returns a Tensor which is the transpose of this Tensor."""
1581
+ return self.transpose()
1582
+
1583
+
1584
+ def _tensor_ndim(self):
1585
+ """Returns the rank of the Tensor."""
1586
+ return self.shape.ndims
1587
+
1588
+
1589
+ def _tensor_pos(self):
1590
+ """Returns self, for unary operator `+`."""
1591
+ return self
1592
+
1593
+
1594
+ def _tensor_size(self):
1595
+ """Returns the number of elements in this Tensor, if fully known."""
1596
+ if not self.shape.is_fully_defined():
1597
+ return None
1598
+ return np.prod(self.shape.as_list())
1599
+
1600
+
1601
+ def _tensor_tolist(self):
1602
+ if ops.is_symbolic_tensor(self):
1603
+ raise ValueError('Symbolic Tensors do not support the tolist API.')
1604
+
1605
+ return self._numpy().tolist() # pylint: disable=protected-access
1606
+
1607
+
1608
+ def _enable_numpy_methods(tensor_class):
1609
+ """A helper method for adding additional NumPy methods."""
1610
+ t = property(_tensor_t)
1611
+ setattr(tensor_class, 'T', t)
1612
+
1613
+ ndim = property(_tensor_ndim)
1614
+ setattr(tensor_class, 'ndim', ndim)
1615
+
1616
+ size = property(_tensor_size)
1617
+ setattr(tensor_class, 'size', size)
1618
+
1619
+ setattr(tensor_class, '__pos__', _tensor_pos)
1620
+ setattr(tensor_class, 'tolist', _tensor_tolist)
1621
+
1622
+ # TODO(b/178540516): Make a custom `setattr` that changes the method's
1623
+ # docstring to the TF one.
1624
+ setattr(tensor_class, 'transpose', np_array_ops.transpose)
1625
+ setattr(tensor_class, 'flatten', np_array_ops.flatten)
1626
+ setattr(tensor_class, 'reshape', np_array_ops._reshape_method_wrapper) # pylint: disable=protected-access
1627
+ setattr(tensor_class, 'ravel', np_array_ops.ravel)
1628
+ setattr(tensor_class, 'clip', clip)
1629
+ setattr(tensor_class, 'astype', math_ops.cast)
1630
+ setattr(tensor_class, '__round__', np_array_ops.around)
1631
+ setattr(tensor_class, 'max', np_array_ops.amax)
1632
+ setattr(tensor_class, 'mean', np_array_ops.mean)
1633
+ setattr(tensor_class, 'min', np_array_ops.amin)
1634
+
1635
+ # TODO(wangpeng): Remove `data` when all uses of it are removed
1636
+ data = property(lambda self: self)
1637
+ setattr(tensor_class, 'data', data)
1638
+
1639
+
1640
+ def enable_numpy_methods_on_tensor():
1641
+ """Adds additional NumPy methods on tf.Tensor class."""
1642
+ _enable_numpy_methods(tensor.Tensor)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_random.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Random functions."""
16
+
17
+ # pylint: disable=g-direct-tensorflow-import
18
+
19
+ import numpy as onp
20
+
21
+ from tensorflow.python.framework import random_seed
22
+ from tensorflow.python.ops import array_ops
23
+ from tensorflow.python.ops import random_ops
24
+ from tensorflow.python.ops.numpy_ops import np_array_ops
25
+ from tensorflow.python.ops.numpy_ops import np_dtypes
26
+ from tensorflow.python.ops.numpy_ops import np_utils
27
+ from tensorflow.python.util import tf_export
28
+
29
+ # TODO(agarwal): deprecate this.
30
+ DEFAULT_RANDN_DTYPE = onp.float32
31
+
32
+
33
+ @tf_export.tf_export('experimental.numpy.random.seed', v1=[])
34
+ @np_utils.np_doc('random.seed')
35
+ def seed(s):
36
+ """Sets the seed for the random number generator.
37
+
38
+ Uses `tf.set_random_seed`.
39
+
40
+ Args:
41
+ s: an integer.
42
+ """
43
+ try:
44
+ s = int(s)
45
+ except TypeError:
46
+ # TODO(wangpeng): support this?
47
+ raise ValueError(
48
+ f'Argument `s` got an invalid value {s}. Only integers are supported.'
49
+ )
50
+ random_seed.set_seed(s)
51
+
52
+
53
+ @tf_export.tf_export('experimental.numpy.random.randn', v1=[])
54
+ @np_utils.np_doc('random.randn')
55
+ def randn(*args):
56
+ """Returns samples from a normal distribution.
57
+
58
+ Uses `tf.random_normal`.
59
+
60
+ Args:
61
+ *args: The shape of the output array.
62
+
63
+ Returns:
64
+ An ndarray with shape `args` and dtype `float64`.
65
+ """
66
+ return standard_normal(size=args)
67
+
68
+
69
+ @tf_export.tf_export('experimental.numpy.random.standard_normal', v1=[])
70
+ @np_utils.np_doc('random.standard_normal')
71
+ def standard_normal(size=None):
72
+ # TODO(wangpeng): Use new stateful RNG
73
+ if size is None:
74
+ size = ()
75
+ elif np_utils.isscalar(size):
76
+ size = (size,)
77
+ dtype = np_utils.result_type(float)
78
+ return random_ops.random_normal(size, dtype=dtype)
79
+
80
+
81
+ @tf_export.tf_export('experimental.numpy.random.uniform', v1=[])
82
+ @np_utils.np_doc('random.uniform')
83
+ def uniform(low=0.0, high=1.0, size=None):
84
+ dtype = np_utils.result_type(float)
85
+ low = np_array_ops.asarray(low, dtype=dtype)
86
+ high = np_array_ops.asarray(high, dtype=dtype)
87
+ if size is None:
88
+ size = array_ops.broadcast_dynamic_shape(low.shape, high.shape)
89
+ return random_ops.random_uniform(
90
+ shape=size, minval=low, maxval=high, dtype=dtype
91
+ )
92
+
93
+
94
+ @tf_export.tf_export('experimental.numpy.random.poisson', v1=[])
95
+ @np_utils.np_doc('random.poisson')
96
+ def poisson(lam=1.0, size=None):
97
+ if size is None:
98
+ size = ()
99
+ elif np_utils.isscalar(size):
100
+ size = (size,)
101
+ return random_ops.random_poisson(shape=size, lam=lam, dtype=np_dtypes.int_)
102
+
103
+
104
+ @tf_export.tf_export('experimental.numpy.random.random', v1=[])
105
+ @np_utils.np_doc('random.random')
106
+ def random(size=None):
107
+ return uniform(0.0, 1.0, size)
108
+
109
+
110
+ @tf_export.tf_export('experimental.numpy.random.rand', v1=[])
111
+ @np_utils.np_doc('random.rand')
112
+ def rand(*size):
113
+ return uniform(0.0, 1.0, size)
114
+
115
+
116
+ @tf_export.tf_export('experimental.numpy.random.randint', v1=[])
117
+ @np_utils.np_doc('random.randint')
118
+ def randint(low, high=None, size=None, dtype=onp.int64): # pylint: disable=missing-function-docstring
119
+ low = int(low)
120
+ if high is None:
121
+ high = low
122
+ low = 0
123
+ if size is None:
124
+ size = ()
125
+ elif isinstance(size, int):
126
+ size = (size,)
127
+ dtype_orig = dtype
128
+ dtype = np_utils.result_type(dtype)
129
+ accepted_dtypes = (onp.int32, onp.int64)
130
+ if dtype not in accepted_dtypes:
131
+ raise ValueError(
132
+ f'Argument `dtype` got an invalid value {dtype_orig}. Only those '
133
+ f'convertible to {accepted_dtypes} are supported.'
134
+ )
135
+ return random_ops.random_uniform(
136
+ shape=size, minval=low, maxval=high, dtype=dtype
137
+ )
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_utils.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Utility functions for internal use."""
16
+ # pylint: disable=g-direct-tensorflow-import
17
+
18
+ import inspect
19
+ import numbers
20
+ import os
21
+ import re
22
+
23
+ import numpy as np
24
+
25
+ from tensorflow.python.framework import dtypes
26
+ from tensorflow.python.framework import flexible_dtypes
27
+ from tensorflow.python.framework import indexed_slices
28
+ from tensorflow.python.framework import ops
29
+ from tensorflow.python.framework import tensor_util
30
+ from tensorflow.python.ops import array_ops
31
+ from tensorflow.python.ops import cond as tf_cond
32
+ from tensorflow.python.ops import math_ops
33
+ from tensorflow.python.ops.numpy_ops import np_arrays
34
+ from tensorflow.python.ops.numpy_ops import np_dtypes
35
+ from tensorflow.python.types import core
36
+ from tensorflow.python.util import nest
37
+ from tensorflow.python.util import tf_export
38
+
39
+
40
+ def _canonicalize_axis(axis, rank):
41
+ return _canonicalize_axes([axis], rank)[0]
42
+
43
+
44
+ def _canonicalize_axes(axes, rank):
45
+ rank = _maybe_static(rank)
46
+
47
+ if isinstance(rank, core.Tensor):
48
+ canonicalizer = lambda axis: cond( # pylint: disable=g-long-lambda
49
+ axis < 0, lambda: axis + rank, lambda: axis
50
+ )
51
+ else:
52
+ canonicalizer = lambda axis: axis + rank if axis < 0 else axis
53
+
54
+ return [canonicalizer(axis) for axis in axes]
55
+
56
+
57
+ def _supports_signature():
58
+ return hasattr(inspect, 'signature')
59
+
60
+
61
+ def _to_tf_type(dtype):
62
+ """Converts a native python or numpy type to TF DType.
63
+
64
+ Args:
65
+ dtype: Could be a python type, a numpy type or a TF DType.
66
+
67
+ Returns:
68
+ A tensorflow `DType`.
69
+ """
70
+ return dtypes.as_dtype(dtype)
71
+
72
+
73
+ def _to_numpy_type(dtype):
74
+ """Converts a native python or TF DType to numpy type.
75
+
76
+ Args:
77
+ dtype: Could be a python type, a numpy type or a TF DType.
78
+
79
+ Returns:
80
+ A NumPy `dtype`.
81
+ """
82
+ if isinstance(dtype, dtypes.DType):
83
+ return dtype.as_numpy_dtype
84
+ return np.dtype(dtype)
85
+
86
+
87
+ def isscalar(val):
88
+ """Returns whether `val` is a scalar value or scalar Tensor."""
89
+ if isinstance(val, np_arrays.ndarray):
90
+ val = val.data
91
+ if isinstance(val, core.Tensor):
92
+ ndims = val.shape.ndims
93
+ if ndims is not None:
94
+ return ndims == 0
95
+ else:
96
+ return math_ops.equal(array_ops.rank(val), 0)
97
+ else:
98
+ return np.isscalar(val)
99
+
100
+
101
+ def _has_docstring(f):
102
+ return (
103
+ f and hasattr(f, '__doc__') and isinstance(f.__doc__, str) and f.__doc__
104
+ )
105
+
106
+
107
+ def _add_blank_line(s):
108
+ if s.endswith('\n'):
109
+ return s + '\n'
110
+ else:
111
+ return s + '\n\n'
112
+
113
+
114
+ def _np_signature(f):
115
+ """An enhanced inspect.signature that can handle numpy.ufunc."""
116
+ # TODO(wangpeng): consider migrating away from inspect.signature.
117
+ # inspect.signature is supported in Python 3.3.
118
+ if not hasattr(inspect, 'signature'):
119
+ return None
120
+ if f is None:
121
+ return None
122
+ if not isinstance(f, np.ufunc):
123
+ try:
124
+ return inspect.signature(f)
125
+ except ValueError:
126
+ return None
127
+
128
+ def names_from_num(prefix, n):
129
+ if n <= 0:
130
+ return []
131
+ elif n == 1:
132
+ return [prefix]
133
+ else:
134
+ return [prefix + str(i + 1) for i in range(n)]
135
+
136
+ input_names = names_from_num('x', f.nin)
137
+ output_names = names_from_num('out', f.nout)
138
+ keyword_only_params = [
139
+ ('where', True),
140
+ ('casting', 'same_kind'),
141
+ ('order', 'K'),
142
+ ('dtype', None),
143
+ ('subok', True),
144
+ ('signature', None),
145
+ ('extobj', None),
146
+ ]
147
+ params = []
148
+ params += [
149
+ inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY)
150
+ for name in input_names
151
+ ]
152
+ if f.nout > 1:
153
+ params += [
154
+ inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY, default=None)
155
+ for name in output_names
156
+ ]
157
+ params += [
158
+ inspect.Parameter(
159
+ 'out',
160
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
161
+ default=None if f.nout == 1 else (None,) * f.nout,
162
+ )
163
+ ]
164
+ params += [
165
+ inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=default)
166
+ for name, default in keyword_only_params
167
+ ]
168
+ return inspect.Signature(params)
169
+
170
+
171
+ # Python 2 doesn't allow keyword-only argument. Python prior to 3.8 doesn't
172
+ # allow positional-only argument. So we conflate positional-only, keyword-only
173
+ # and positional-or-keyword arguments here.
174
+ def _is_compatible_param_kind(a, b):
175
+ def relax(k):
176
+ if k in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.KEYWORD_ONLY):
177
+ return inspect.Parameter.POSITIONAL_OR_KEYWORD
178
+ return k
179
+
180
+ return relax(a) == relax(b)
181
+
182
+
183
+ def _prepare_np_fun_name_and_fun(np_fun_name, np_fun):
184
+ """Mutually propagates information between `np_fun_name` and `np_fun`.
185
+
186
+ If one is None and the other is not, we'll try to make the former not None in
187
+ a best effort.
188
+
189
+ Args:
190
+ np_fun_name: name for the np_fun symbol. At least one of np_fun or
191
+ np_fun_name shoud be set.
192
+ np_fun: the numpy function whose docstring will be used.
193
+
194
+ Returns:
195
+ Processed `np_fun_name` and `np_fun`.
196
+ """
197
+ if np_fun_name is not None:
198
+ assert isinstance(np_fun_name, str)
199
+ if np_fun is not None:
200
+ assert not isinstance(np_fun, str)
201
+ if np_fun is None:
202
+ assert np_fun_name is not None
203
+ try:
204
+ np_fun = getattr(np, str(np_fun_name))
205
+ except AttributeError:
206
+ np_fun = None
207
+ if np_fun_name is None:
208
+ assert np_fun is not None
209
+ np_fun_name = np_fun.__name__
210
+ return np_fun_name, np_fun
211
+
212
+
213
+ def _np_doc_helper(
214
+ f, np_f, np_fun_name=None, unsupported_params=None, link=None
215
+ ):
216
+ """Helper to get docs."""
217
+ assert np_f or np_fun_name
218
+ if not np_fun_name:
219
+ np_fun_name = np_f.__name__
220
+ doc = "TensorFlow variant of NumPy's `%s`.\n\n" % np_fun_name
221
+ if unsupported_params:
222
+ doc += (
223
+ 'Unsupported arguments: '
224
+ + ', '.join('`' + name + '`' for name in unsupported_params)
225
+ + '.\n\n'
226
+ )
227
+ if _has_docstring(f):
228
+ doc += f.__doc__
229
+ doc = _add_blank_line(doc)
230
+ # TODO(wangpeng): Re-enable the following and choose inlined vs. link to numpy
231
+ # doc according to some global switch.
232
+ doc = _add_np_doc(doc, np_fun_name, np_f, link=link)
233
+ return doc
234
+
235
+
236
+ _np_doc_form = os.getenv('TF_NP_DOC_FORM', 'stable')
237
+
238
+
239
+ def get_np_doc_form():
240
+ """Gets the form of the original numpy docstrings.
241
+
242
+ Returns:
243
+ See `set_np_doc_form` for the list of valid values.
244
+ """
245
+ return _np_doc_form
246
+
247
+
248
+ def set_np_doc_form(value):
249
+ r"""Selects the form of the original numpy docstrings.
250
+
251
+ This function sets a global variable that controls how a tf-numpy symbol's
252
+ docstring should refer to the original numpy docstring. If `value` is
253
+ `'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy
254
+ docstring. Otherwise, a link to the original numpy docstring will be
255
+ added. Which numpy version the link points to depends on `value`:
256
+ * `'stable'`: the current stable version;
257
+ * `'dev'`: the current development version;
258
+ * pattern `\d+(\.\d+(\.\d+)?)?`: `value` will be treated as a version number,
259
+ e.g. '1.16'.
260
+
261
+ Args:
262
+ value: the value to set the global variable to.
263
+ """
264
+ global _np_doc_form
265
+ _np_doc_form = value
266
+
267
+
268
+ class Link:
269
+
270
+ def __init__(self, v):
271
+ self.value = v
272
+
273
+
274
+ class AliasOf:
275
+
276
+ def __init__(self, v):
277
+ self.value = v
278
+
279
+
280
+ class NoLink:
281
+ pass
282
+
283
+
284
+ def generate_link(flag, np_fun_name):
285
+ """Generates link from numpy function name.
286
+
287
+ Args:
288
+ flag: the flag to control link form. See `set_np_doc_form`.
289
+ np_fun_name: the numpy function name.
290
+
291
+ Returns:
292
+ A string.
293
+ """
294
+ # Only adds link in this case
295
+ if flag == 'dev':
296
+ template = 'https://numpy.org/devdocs/reference/generated/numpy.%s.html'
297
+ elif flag == 'stable':
298
+ template = 'https://numpy.org/doc/stable/reference/generated/numpy.%s.html'
299
+ elif re.match(r'\d+(\.\d+(\.\d+)?)?$', flag):
300
+ # `flag` is the version number
301
+ template = f'https://numpy.org/doc/{flag}/reference/generated/numpy.%s.html'
302
+ else:
303
+ return None
304
+ return template % np_fun_name
305
+
306
+
307
+ _is_check_link = os.getenv('TF_NP_CHECK_LINK', 'False') in ('True', 'true', '1')
308
+
309
+
310
+ def is_check_link():
311
+ return _is_check_link
312
+
313
+
314
+ def set_check_link(value):
315
+ global _is_check_link
316
+ _is_check_link = value
317
+
318
+
319
+ def _add_np_doc(doc, np_fun_name, np_f, link):
320
+ """Appends the numpy docstring to `doc`, according to `set_np_doc_form`.
321
+
322
+ See `set_np_doc_form` for how it controls the form of the numpy docstring.
323
+
324
+ Args:
325
+ doc: the docstring to be appended to.
326
+ np_fun_name: the name of the numpy function.
327
+ np_f: (optional) the numpy function.
328
+ link: (optional) which link to use. See `np_doc` for details.
329
+
330
+ Returns:
331
+ `doc` with numpy docstring appended.
332
+ """
333
+ flag = get_np_doc_form()
334
+ if flag == 'inlined':
335
+ if _has_docstring(np_f):
336
+ doc += 'Documentation for `numpy.%s`:\n\n' % np_fun_name
337
+ # TODO(wangpeng): It looks like code snippets in numpy doc don't work
338
+ # correctly with doctest. Fix that and remove the reformatting of the np_f
339
+ # comment.
340
+ doc += np_f.__doc__.replace('>>>', '>')
341
+ elif isinstance(flag, str):
342
+ if link is None:
343
+ url = generate_link(flag, np_fun_name)
344
+ elif isinstance(link, AliasOf):
345
+ url = generate_link(flag, link.value)
346
+ elif isinstance(link, Link):
347
+ url = link.value
348
+ else:
349
+ url = None
350
+ if url is not None:
351
+ if is_check_link():
352
+ # Imports locally because some builds may not have `requests`
353
+ import requests # pylint: disable=g-import-not-at-top
354
+
355
+ r = requests.head(url)
356
+ if r.status_code != 200:
357
+ raise ValueError(
358
+ f'Check link failed at [{url}] with status code {r.status_code}. '
359
+ f'Argument `np_fun_name` is {np_fun_name}.'
360
+ )
361
+ doc += 'See the NumPy documentation for [`numpy.%s`](%s).' % (
362
+ np_fun_name,
363
+ url,
364
+ )
365
+ return doc
366
+
367
+
368
+ _is_sig_mismatch_an_error = os.getenv(
369
+ 'TF_NP_SIG_MISMATCH_IS_ERROR', 'False'
370
+ ) in ('True', 'true', '1')
371
+
372
+
373
+ def is_sig_mismatch_an_error():
374
+ return _is_sig_mismatch_an_error
375
+
376
+
377
+ def set_is_sig_mismatch_an_error(value):
378
+ global _is_sig_mismatch_an_error
379
+ _is_sig_mismatch_an_error = value
380
+
381
+
382
+ def np_doc(np_fun_name, np_fun=None, unsupported_params=None, link=None):
383
+ """Attachs numpy docstring to a function.
384
+
385
+ Args:
386
+ np_fun_name: name for the np_fun symbol. At least one of np_fun or
387
+ np_fun_name shoud be set.
388
+ np_fun: (optional) the numpy function whose docstring will be used.
389
+ unsupported_params: (optional) the list of parameters not supported by
390
+ tf.numpy.
391
+ link: (optional) which link to use. If `None`, a default link generated from
392
+ `np_fun_name` will be used. If an instance of `AliasOf`, `link.value` will
393
+ be used in place of `np_fun_name` for the link generation. If an instance
394
+ of `Link`, `link.value` will be used as the whole link. If an instance of
395
+ `NoLink`, no link will be added.
396
+
397
+ Returns:
398
+ A function decorator that attaches the docstring from `np_fun` to the
399
+ decorated function.
400
+ """
401
+ np_fun_name_orig, np_fun_orig = np_fun_name, np_fun
402
+ np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
403
+ np_sig = _np_signature(np_fun)
404
+ if unsupported_params is None:
405
+ unsupported_params = []
406
+
407
+ def decorator(f):
408
+ """The decorator."""
409
+ if hasattr(inspect, 'signature') and np_sig is not None:
410
+ try:
411
+ sig = inspect.signature(f)
412
+ except ValueError:
413
+ sig = None
414
+ if sig is not None:
415
+ for name, param in sig.parameters.items():
416
+ np_param = np_sig.parameters.get(name)
417
+ if np_param is None:
418
+ if is_sig_mismatch_an_error():
419
+ raise TypeError(
420
+ f"Cannot find parameter {name} in the numpy function's "
421
+ 'signature (which has these parameters: '
422
+ f'{list(np_sig.parameters.keys())}). Argument `np_fun_name` '
423
+ f'is {np_fun_name_orig}. Argument `np_fun` is {np_fun_orig}.'
424
+ )
425
+ else:
426
+ continue
427
+ if is_sig_mismatch_an_error() and not _is_compatible_param_kind(
428
+ param.kind, np_param.kind
429
+ ):
430
+ raise TypeError(
431
+ f'Parameter {name} is of kind {param.kind} while in numpy it '
432
+ f'is of kind {np_param.kind}. Argument `np_fun_name` is '
433
+ f'{np_fun_name_orig}. Argument `np_fun` is {np_fun_orig}.'
434
+ )
435
+ has_default = param.default != inspect.Parameter.empty
436
+ np_has_default = np_param.default != inspect.Parameter.empty
437
+ if is_sig_mismatch_an_error() and has_default != np_has_default:
438
+ raise TypeError(
439
+ 'Parameter {} should{} have a default value. Argument '
440
+ '`np_fun_name` is {}. Argument `np_fun` is {}.'.format(
441
+ name,
442
+ '' if np_has_default else ' not',
443
+ np_fun_name_orig,
444
+ np_fun_orig,
445
+ )
446
+ )
447
+ for name in np_sig.parameters:
448
+ if name not in sig.parameters:
449
+ unsupported_params.append(name)
450
+ f.__doc__ = _np_doc_helper(
451
+ f,
452
+ np_fun,
453
+ np_fun_name=np_fun_name,
454
+ unsupported_params=unsupported_params,
455
+ link=link,
456
+ )
457
+ return f
458
+
459
+ return decorator
460
+
461
+
462
+ def np_doc_only(np_fun_name, np_fun=None):
463
+ """Attachs numpy docstring to a function.
464
+
465
+ This differs from np_doc in that it doesn't check for a match in signature.
466
+
467
+ Args:
468
+ np_fun_name: name for the np_fun symbol. At least one of np_fun or
469
+ np_fun_name shoud be set.
470
+ np_fun: (optional) the numpy function whose docstring will be used.
471
+
472
+ Returns:
473
+ A function decorator that attaches the docstring from `np_fun` to the
474
+ decorated function.
475
+ """
476
+ np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
477
+
478
+ def decorator(f):
479
+ f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)
480
+ return f
481
+
482
+ return decorator
483
+
484
+
485
+ # pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args
486
+ @tf_export.tf_export('experimental.numpy.finfo', v1=[])
487
+ @np_doc('finfo')
488
+ def finfo(dtype):
489
+ """Note that currently it just forwards to the numpy namesake, while
490
+
491
+ tensorflow and numpy dtypes may have different properties.
492
+ """
493
+ return np.finfo(_to_numpy_type(dtype))
494
+
495
+
496
+ # pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args
497
+
498
+
499
+ def _maybe_get_dtype(x):
500
+ """Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
501
+ # Don't put np.ndarray in this list, because np.result_type looks at the
502
+ # value (not just dtype) of np.ndarray to decide the result type.
503
+ if isinstance(x, numbers.Real):
504
+ return x
505
+ if isinstance(x, indexed_slices.IndexedSlices) or tensor_util.is_tf_type(x):
506
+ return _to_numpy_type(x.dtype)
507
+ if isinstance(x, dtypes.DType):
508
+ return x.as_numpy_dtype
509
+ if isinstance(x, (list, tuple)):
510
+ raise ValueError(
511
+ 'Cannot find dtype for type inference from argument `x` of a sequence '
512
+ f'type {type(x)}. For sequences, please call this function on each '
513
+ 'element individually.'
514
+ )
515
+ return x
516
+
517
+
518
+ @tf_export.tf_export('experimental.numpy.result_type', v1=[])
519
+ # Can't use np_doc because np.result_type is a builtin function.
520
+ @np_doc_only('result_type')
521
+ def result_type(*arrays_and_dtypes): # pylint: disable=missing-function-docstring
522
+ if ops.is_auto_dtype_conversion_enabled():
523
+ # Use auto dtype conversion semantics for type inference.
524
+ dtype, _ = flexible_dtypes.result_type(*arrays_and_dtypes)
525
+ return dtype
526
+ arrays_and_dtypes = [
527
+ _maybe_get_dtype(x) for x in nest.flatten(arrays_and_dtypes)
528
+ ]
529
+ if not arrays_and_dtypes:
530
+ # If arrays_and_dtypes is an empty list, let numpy decide what the dtype is.
531
+ arrays_and_dtypes = [np.asarray([])]
532
+ return np_dtypes._result_type(*arrays_and_dtypes) # pylint: disable=protected-access
533
+
534
+
535
+ def result_type_unary(a, dtype): # pylint: disable=missing-function-docstring
536
+ """Find the result type from a single input and a dtype."""
537
+ if dtype:
538
+ # We need to let np_utils.result_type decide the dtype, not tf.zeros_like
539
+ return result_type(dtype)
540
+
541
+ # np_utils.result_type treats string inputs as dtype strings, not as strings.
542
+ # but for unary we want to treat it as a string input.
543
+ if isinstance(a, str):
544
+ return np.unicode_
545
+ elif isinstance(a, bytes):
546
+ return np.bytes_
547
+
548
+ # TF and numpy has different interpretations of Python types such as
549
+ # `float`, so we let `np_utils.result_type` decide.
550
+ return result_type(a)
551
+
552
+
553
+ def _result_type_binary(t1, t2): # pylint: disable=missing-function-docstring
554
+ """A specialization of result_type for 2 arguments for performance reasons."""
555
+ try:
556
+ return np_dtypes._result_type( # pylint: disable=protected-access
557
+ _maybe_get_dtype(t1),
558
+ _maybe_get_dtype(t2),
559
+ )
560
+ except ValueError:
561
+ return result_type(t1, t2)
562
+
563
+
564
+ @tf_export.tf_export('experimental.numpy.promote_types', v1=[])
565
+ @np_doc('promote_types')
566
+ def promote_types(type1, type2): # pylint: disable=missing-function-docstring
567
+ type1 = _to_numpy_type(type1)
568
+ type2 = _to_numpy_type(type2)
569
+ return np_dtypes.canonicalize_dtype(np.promote_types(type1, type2))
570
+
571
+
572
+ def tf_broadcast(*args):
573
+ """Broadcast tensors.
574
+
575
+ Args:
576
+ *args: a list of tensors whose shapes are broadcastable against each other.
577
+
578
+ Returns:
579
+ Tensors broadcasted to the common shape.
580
+ """
581
+ if len(args) <= 1:
582
+ return args
583
+ sh = array_ops.shape(args[0])
584
+ for arg in args[1:]:
585
+ sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))
586
+ return [array_ops.broadcast_to(arg, sh) for arg in args]
587
+
588
+
589
+ # TODO(wangpeng): Move the following functions to a separate file and check for
590
+ # float dtypes in each of them.
591
+
592
+
593
+ def get_static_value(x):
594
+ """A version of tf.get_static_value that returns None on float dtypes.
595
+
596
+ It returns None on float dtypes in order to avoid breaking gradients.
597
+
598
+ Args:
599
+ x: a tensor.
600
+
601
+ Returns:
602
+ Same as `tf.get_static_value`, except that it returns None when `x` has a
603
+ float dtype.
604
+ """
605
+ if isinstance(x, core.Tensor) and (x.dtype.is_floating or x.dtype.is_complex):
606
+ return None
607
+ return tensor_util.constant_value(x)
608
+
609
+
610
+ def _maybe_static(x):
611
+ value = get_static_value(x)
612
+ if value is None:
613
+ return x
614
+ else:
615
+ return value
616
+
617
+
618
+ # All the following functions exist becaues get_static_value can't handle
619
+ # their TF counterparts.
620
+
621
+
622
+ def cond(pred, true_fn, false_fn):
623
+ """A version of tf.cond that tries to evaluate the condition."""
624
+ v = get_static_value(pred)
625
+ if v is None:
626
+ return tf_cond.cond(pred, true_fn, false_fn)
627
+ if v:
628
+ return true_fn()
629
+ else:
630
+ return false_fn()
631
+
632
+
633
+ def add(a, b):
634
+ """A version of tf.add that eagerly evaluates if possible."""
635
+ return _maybe_static(a) + _maybe_static(b)
636
+
637
+
638
+ def subtract(a, b):
639
+ """A version of tf.subtract that eagerly evaluates if possible."""
640
+ return _maybe_static(a) - _maybe_static(b)
641
+
642
+
643
+ def greater(a, b):
644
+ """A version of tf.greater that eagerly evaluates if possible."""
645
+ return _maybe_static(a) > _maybe_static(b)
646
+
647
+
648
+ def greater_equal(a, b):
649
+ """A version of tf.greater_equal that eagerly evaluates if possible."""
650
+ return _maybe_static(a) >= _maybe_static(b)
651
+
652
+
653
+ def less_equal(a, b):
654
+ """A version of tf.less_equal that eagerly evaluates if possible."""
655
+ return _maybe_static(a) <= _maybe_static(b)
656
+
657
+
658
+ def logical_and(a, b):
659
+ """A version of tf.logical_and that eagerly evaluates if possible."""
660
+ a_value = get_static_value(a)
661
+ if a_value is not None:
662
+ if np.isscalar(a_value):
663
+ if a_value:
664
+ return _maybe_static(b)
665
+ else:
666
+ return a_value
667
+ else:
668
+ return a_value & _maybe_static(b)
669
+ else:
670
+ return a & _maybe_static(b)
671
+
672
+
673
+ def logical_or(a, b):
674
+ """A version of tf.logical_or that eagerly evaluates if possible."""
675
+ a_value = get_static_value(a)
676
+ if a_value is not None:
677
+ if np.isscalar(a_value):
678
+ if a_value:
679
+ return a_value
680
+ else:
681
+ return _maybe_static(b)
682
+ else:
683
+ return a_value | _maybe_static(b)
684
+ else:
685
+ return a | _maybe_static(b)
686
+
687
+
688
+ def getitem(a, slice_spec):
689
+ """A version of __getitem__ that eagerly evaluates if possible."""
690
+ return _maybe_static(a)[slice_spec]
691
+
692
+
693
+ def reduce_all(input_tensor, axis=None, keepdims=False):
694
+ """A version of tf.reduce_all that eagerly evaluates if possible."""
695
+ v = get_static_value(input_tensor)
696
+ if v is None:
697
+ return math_ops.reduce_all(input_tensor, axis=axis, keepdims=keepdims)
698
+ else:
699
+ return v.all(axis=axis, keepdims=keepdims)
700
+
701
+
702
+ def reduce_any(input_tensor, axis=None, keepdims=False):
703
+ """A version of tf.reduce_any that eagerly evaluates if possible."""
704
+ v = get_static_value(input_tensor)
705
+ if v is None:
706
+ return math_ops.reduce_any(input_tensor, axis=axis, keepdims=keepdims)
707
+ else:
708
+ return v.any(axis=axis, keepdims=keepdims)
709
+
710
+
711
+ def tf_rank(t):
712
+ r = t.shape.rank
713
+ if r is not None:
714
+ return r
715
+ return array_ops.rank(t)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Ops for pfor, for_loop, jacobian."""
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (235 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/control_flow_ops.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/gradients.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/control_flow_ops.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """for_loop and pfor ops."""
16
+ # pylint: disable=g-direct-tensorflow-import
17
+
18
+ import functools
19
+
20
+ from tensorflow.python.eager import context
21
+ from tensorflow.python.eager import def_function
22
+ from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
23
+ from tensorflow.python.autograph.impl import api as autograph
24
+ from tensorflow.python.framework import composite_tensor
25
+ from tensorflow.python.framework import indexed_slices
26
+ from tensorflow.python.framework import ops
27
+ from tensorflow.python.framework import sparse_tensor
28
+ from tensorflow.python.framework import tensor
29
+ from tensorflow.python.framework import tensor_shape
30
+ from tensorflow.python.framework import tensor_util
31
+ from tensorflow.python.framework import type_spec
32
+ from tensorflow.python.ops import array_ops
33
+ from tensorflow.python.ops import cond
34
+ from tensorflow.python.ops import math_ops
35
+ from tensorflow.python.ops import tensor_array_ops
36
+ from tensorflow.python.ops import while_loop
37
+ from tensorflow.python.ops.parallel_for.pfor import PFor
38
+ from tensorflow.python.ops.parallel_for.pfor import PForConfig
39
+ from tensorflow.python.platform import tf_logging as logging
40
+ from tensorflow.python.util import nest
41
+ from tensorflow.python.util import tf_decorator
42
+ from tensorflow.python.util import tf_inspect
43
+ from tensorflow.python.util import variable_utils
44
+ from tensorflow.python.util.tf_export import tf_export
45
+
46
+
47
+ def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
48
+ """Runs `loop_fn` `iters` times and stacks the outputs.
49
+
50
+
51
+ Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
52
+ stacks corresponding outputs of the different runs.
53
+
54
+ Args:
55
+ loop_fn: A function that takes an int32 scalar tf.Tensor object representing
56
+ the iteration number, and returns a possibly nested structure of tensor
57
+ objects. The shape of these outputs should not depend on the input.
58
+ loop_fn_dtypes: dtypes for the outputs of `loop_fn`.
59
+ iters: Number of iterations for which to run `loop_fn`.
60
+ parallel_iterations: The number of iterations that can be dispatched in
61
+ parallel. This knob can be used to control the total memory usage.
62
+
63
+ Returns:
64
+ Returns a nested structure of stacked output tensor objects with the same
65
+ nested structure as the output of `loop_fn`.
66
+ """
67
+
68
+ flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
69
+ is_none_list = []
70
+
71
+ def while_body(i, *ta_list):
72
+ """Body of while loop."""
73
+ fn_conv = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())
74
+ fn_output = nest.flatten(fn_conv(i))
75
+ if len(fn_output) != len(flat_loop_fn_dtypes):
76
+ raise ValueError(
77
+ f"Number of expected outputs {len(flat_loop_fn_dtypes)}, does not "
78
+ f"match the number of actual outputs {len(fn_output)} from loop_fn: "
79
+ f"{loop_fn} with output {fn_output}.")
80
+ outputs = []
81
+ del is_none_list[:]
82
+ is_none_list.extend(x is None for x in fn_output)
83
+ for out, ta in zip(fn_output, ta_list):
84
+ # TODO(agarwal): support returning Operation objects from loop_fn.
85
+ if out is not None:
86
+ # out may be a ref tensor, wrap it in identity to get a non-ref tensor.
87
+ ta = ta.write(i, out)
88
+ outputs.append(ta)
89
+ return tuple([i + 1] + outputs)
90
+
91
+ if parallel_iterations is not None:
92
+ extra_args = {"parallel_iterations": parallel_iterations}
93
+ else:
94
+ extra_args = {}
95
+ ta_list = while_loop.while_loop(lambda i, *ta: i < iters, while_body, [0] + [
96
+ tensor_array_ops.TensorArray(dtype.base_dtype, iters)
97
+ for dtype in flat_loop_fn_dtypes
98
+ ], **extra_args)[1:]
99
+
100
+ # TODO(rachelim): enable this for sparse tensors
101
+
102
+ output = [
103
+ None if is_none else ta.stack()
104
+ for ta, is_none in zip(ta_list, is_none_list)
105
+ ]
106
+ assert len(output) in (0, len(flat_loop_fn_dtypes))
107
+ if not output:
108
+ # This may happen for the case where iters == 0.
109
+ # Pack a list of empty tensors with the proper ranks to match pfor output on 0 iters
110
+ loop_var = array_ops.placeholder_with_default(0, shape=[])
111
+ try:
112
+ loop_fn_out = loop_fn(loop_var)
113
+ out_shapes = [
114
+ [0] + ops.convert_to_tensor(x).shape
115
+ for x in nest.flatten(loop_fn_out)
116
+ ]
117
+ output = [
118
+ array_ops.zeros(out_shapes[i], dt)
119
+ for i, dt in enumerate(flat_loop_fn_dtypes)
120
+ ]
121
+ except Exception:
122
+ output = [array_ops.zeros([0])]
123
+ return nest.pack_sequence_as(loop_fn_dtypes, output)
124
+
125
+
126
+ def _flatten_first_two_dims(x):
127
+ """Flattens the first two dimensions of x into a single dimension."""
128
+ old_shape = array_ops.shape(x)
129
+ new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
130
+ axis=0)
131
+ return array_ops.reshape(x, new_shape)
132
+
133
+
134
+ PFOR_CONFIG_ARG = "pfor_config"
135
+
136
+
137
+ def _is_under_xla_context():
138
+ """Check if we are currently inside an XLA compile context."""
139
+ g = ops.get_default_graph()
140
+ while g is not None:
141
+ control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
142
+ while control_flow_context is not None:
143
+ if control_flow_context.IsXLAContext():
144
+ return True
145
+ else:
146
+ control_flow_context = control_flow_context.outer_context
147
+ # If g is a FuncGraph, get its outer_graph.
148
+ g = getattr(g, "outer_graph", None)
149
+ return False
150
+
151
+
152
+ def pfor(loop_fn,
153
+ iters,
154
+ fallback_to_while_loop=True,
155
+ parallel_iterations=None,
156
+ warn=False):
157
+ """Equivalent to running `loop_fn` `iters` times and stacking the outputs.
158
+
159
+ `pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
160
+ times, with input from 0 to `iters - 1`, and stacking corresponding output of
161
+ each iteration. However the implementation does not use a `tf.while_loop`.
162
+ Instead it adds new operations to the graph that collectively compute the same
163
+ value as what running `loop_fn` in a loop would compute.
164
+
165
+
166
+ This is an experimental feature and currently has a lot of limitations:
167
+ - There should be no data dependency between the different iterations. For
168
+ example, a future iteration should not depend on a value or side-effect of
169
+ a previous iteration.
170
+ - Stateful kernels may mostly not be supported since these often imply a
171
+ data dependency or ordering of the iterations. We do support a limited set
172
+ of such stateful kernels though (like RandomFoo, Variable operations like
173
+ reads, etc).
174
+ - Conversion works only on a limited set of kernels for which a converter
175
+ has been registered.
176
+ - `loop_fn` has limited support for control flow operations. `tf.cond` in
177
+ particular is not supported.
178
+ - `loop_fn` should return nested structure of Tensors or Operations. However
179
+ if an Operation is returned, it should have zero outputs.
180
+ - The shape and dtype of `loop_fn` outputs should not depend on the input
181
+ to loop_fn.
182
+
183
+ Args:
184
+ loop_fn: A function that takes an int32 scalar tf.Tensor object representing
185
+ the iteration number, and optionally a keyword argument `pfor_config` set
186
+ to a PForConfig object. It returns a possibly nested structure of Tensor
187
+ or Operation objects. Note that if setting `parallel_iterations` argument
188
+ to something other than None, `loop_fn` may be called more than once
189
+ during graph construction. So it may need to avoid mutating global state.
190
+ iters: Number of iterations for which to run `loop_fn`.
191
+ fallback_to_while_loop: If true, on failing to vectorize an operation, pfor
192
+ fallbacks to using a `tf.while_loop` to dispatch the iterations.
193
+ parallel_iterations: A knob to control how many iterations are vectorized
194
+ and dispatched in parallel. The default value of None corresponds to
195
+ vectorizing all the iterations. If `parallel_iterations` is smaller than
196
+ `iters`, then chunks of at most that many iterations are dispatched in
197
+ sequence. This knob can be used to control the total memory usage.
198
+ warn: Whether or not to warn when falling back to while loops.
199
+
200
+ Returns:
201
+ Returns a nested structure of stacked tensor objects with the same nested
202
+ structure as the output of `loop_fn`.
203
+ Raises:
204
+ ValueError: If parallel_iterations is not None and not an integer > 1.
205
+ """
206
+ def f():
207
+ return _pfor_impl(
208
+ loop_fn,
209
+ iters,
210
+ fallback_to_while_loop=fallback_to_while_loop,
211
+ parallel_iterations=parallel_iterations,
212
+ warn=warn)
213
+ # Note that we wrap into a tf.function if in eager execution mode or under
214
+ # XLA compilation. The latter is so that we don't compile operations like
215
+ # tf.placeholder that are created by the loop body.
216
+ functions_run_eagerly = None
217
+ if context.executing_eagerly() or _is_under_xla_context():
218
+ functions_run_eagerly = def_function.functions_run_eagerly()
219
+ if functions_run_eagerly:
220
+ logging.warning(
221
+ "It looks like tf.function behavior was disabled, perhaps using "
222
+ "tf.config.run_functions_eagerly. Vectorization "
223
+ "primitives (e.g. tf.vectorized_map) require tf.function to work. "
224
+ "These primitives will override the disable.")
225
+ def_function.run_functions_eagerly(False)
226
+ f = def_function.function(f)
227
+
228
+ outputs = f()
229
+ if functions_run_eagerly is not None:
230
+ def_function.run_functions_eagerly(functions_run_eagerly)
231
+ return outputs
232
+
233
+
234
+ def _should_expand_composite(value):
235
+ return (isinstance(value, composite_tensor.CompositeTensor)
236
+ # Leave sparse tensors to be converted by `PFor._convert_sparse`.
237
+ and not isinstance(value, sparse_tensor.SparseTensor)
238
+ and not isinstance(value, indexed_slices.IndexedSlices))
239
+
240
+
241
+ # pylint: disable=protected-access
242
+ def _composite_to_tensors(value, is_batched=False):
243
+ """Converts a CompositeTensor into a list of stackable tensors."""
244
+ if _should_expand_composite(value):
245
+ spec = value._type_spec
246
+ if not isinstance(spec, type_spec.BatchableTypeSpec):
247
+ raise ValueError(f"CompositeTensor instance {value} returned from "
248
+ "parallel_for or vectorized_map loop body must provide "
249
+ f"a `BatchableTypeSpec` (saw: {spec}).")
250
+ if is_batched:
251
+ return spec._to_batched_tensor_list(value)
252
+ return spec._to_tensor_list(value)
253
+ return value
254
+ # pylint: enable=protected-access
255
+
256
+
257
+ # pylint: disable=protected-access
258
+ def _composite_from_tensors(stacked_tensors,
259
+ preconverted_value,
260
+ batch_size):
261
+ """Converts a list of stacked tensors to a batch CompositeTensor."""
262
+ if _should_expand_composite(preconverted_value):
263
+ batch_type_spec = preconverted_value._type_spec._batch(batch_size)
264
+ return batch_type_spec._from_compatible_tensor_list(stacked_tensors)
265
+ return stacked_tensors
266
+ # pylint: enable=protected-access
267
+
268
+
269
+ def _loop_fn_has_config(loop_fn):
270
+ """Test if `loop_fn` has a `pfor_config` argument."""
271
+ if tf_inspect.isfunction(loop_fn):
272
+ argspec = tf_inspect.getargspec(loop_fn)
273
+ return PFOR_CONFIG_ARG in argspec.args
274
+ elif isinstance(loop_fn, functools.partial):
275
+ fn = loop_fn.func
276
+ argspec = tf_inspect.getargspec(fn)
277
+ return (PFOR_CONFIG_ARG in argspec.args and
278
+ PFOR_CONFIG_ARG not in loop_fn.keywords)
279
+ else:
280
+ loop_class = tf_decorator.unwrap(loop_fn)[1]
281
+ if not hasattr(loop_class, "__call__"):
282
+ raise ValueError("`loop_fn` object did not have a __call__ method")
283
+ argspec = tf_inspect.getargspec(loop_class.__call__)
284
+ return PFOR_CONFIG_ARG in argspec.args
285
+
286
+
287
+ def _pfor_impl(loop_fn,
288
+ iters,
289
+ fallback_to_while_loop,
290
+ parallel_iterations=None,
291
+ pfor_config=None,
292
+ warn=False):
293
+ """Implementation of pfor."""
294
+ assert not context.executing_eagerly()
295
+ loop_fn_has_config = _loop_fn_has_config(loop_fn)
296
+ existing_ops = set(ops.get_default_graph().get_operations())
297
+ iters_value = tensor_util.constant_value(iters)
298
+ # Run the loop body
299
+ with ops.name_scope("loop_body"):
300
+ loop_var = array_ops.placeholder_with_default(0, shape=[])
301
+ if loop_fn_has_config:
302
+ if pfor_config is None:
303
+ pfor_config = PForConfig()
304
+ pfor_config._set_iters(iters) # pylint: disable=protected-access
305
+ loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
306
+ else:
307
+ assert pfor_config is None
308
+ f = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())
309
+ loop_fn_outputs = f(loop_var)
310
+ loop_fn_output_tensors = nest.map_structure(_composite_to_tensors,
311
+ loop_fn_outputs)
312
+
313
+ # Convert outputs to Tensor if needed.
314
+ tmp_loop_fn_outputs = []
315
+ for loop_fn_output in nest.flatten(loop_fn_output_tensors):
316
+ if (loop_fn_output is not None and not isinstance(
317
+ loop_fn_output,
318
+ (ops.Operation, tensor.Tensor, sparse_tensor.SparseTensor))):
319
+ if isinstance(loop_fn_output, indexed_slices.IndexedSlices):
320
+ logging.warn("Converting %s to a dense representation may make it slow."
321
+ " Alternatively, output the indices and values of the"
322
+ " IndexedSlices separately, and handle the vectorized"
323
+ " outputs directly." % loop_fn_output)
324
+ loop_fn_output = ops.convert_to_tensor(loop_fn_output)
325
+ else:
326
+ loop_fn_output = ops.convert_to_tensor(loop_fn_output)
327
+ tmp_loop_fn_outputs.append(loop_fn_output)
328
+ loop_fn_output_tensors = nest.pack_sequence_as(loop_fn_output_tensors,
329
+ tmp_loop_fn_outputs)
330
+
331
+ new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
332
+ iters = ops.convert_to_tensor(iters)
333
+ if parallel_iterations is not None:
334
+ if parallel_iterations < 1:
335
+ raise ValueError(
336
+ "Argument `parallel_iterations` must be None or a positive integer. "
337
+ f"Received: {parallel_iterations}.")
338
+ if parallel_iterations == 1:
339
+ raise ValueError(
340
+ "Found `parallel_iterations == 1`. Use `for_loop` instead.")
341
+ if iters_value is not None and iters_value < parallel_iterations:
342
+ parallel_iterations = None
343
+ if parallel_iterations is None:
344
+ with ops.name_scope("pfor"):
345
+ converter = PFor(
346
+ loop_var,
347
+ iters,
348
+ new_ops,
349
+ fallback_to_while_loop=fallback_to_while_loop,
350
+ pfor_config=pfor_config,
351
+ warn=warn)
352
+ flattened_output_tensors = []
353
+ for loop_fn_output in nest.flatten(loop_fn_output_tensors):
354
+ output = converter.convert(loop_fn_output)
355
+ flattened_output_tensors.append(output)
356
+ else:
357
+ if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
358
+ raise ValueError("Setting `parallel_iterations` currently unsupported if "
359
+ "reductions across iterations are performed.")
360
+ num_tiled_iterations = iters // parallel_iterations
361
+ num_remaining_iterations = iters % parallel_iterations
362
+ # TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
363
+ # a tf.function and extract the graph from there to vectorize it.
364
+ with ops.name_scope("pfor_untiled"):
365
+ converter = PFor(loop_var, num_remaining_iterations, new_ops,
366
+ fallback_to_while_loop=fallback_to_while_loop,
367
+ pfor_config=pfor_config)
368
+ remaining_output_tensors = []
369
+ flattened_output_tensors = nest.flatten(loop_fn_output_tensors)
370
+ for loop_fn_output in flattened_output_tensors:
371
+ output = converter.convert(loop_fn_output)
372
+ remaining_output_tensors.append(output)
373
+
374
+ with ops.name_scope("pfor_tiled"):
375
+ loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
376
+ for x in flattened_output_tensors]
377
+
378
+ def tiled_loop_body(j):
379
+ offset = j * parallel_iterations + num_remaining_iterations
380
+
381
+ def tiled_loop_fn(i, pfor_config=None):
382
+ if loop_fn_has_config:
383
+ loop_fn_outputs = loop_fn(i + offset, pfor_config=pfor_config)
384
+ else:
385
+ loop_fn_outputs = loop_fn(i + offset)
386
+ return nest.flatten(
387
+ # Stacking across iterations requires explicit Tensors.
388
+ nest.map_structure(_composite_to_tensors, loop_fn_outputs))
389
+
390
+ return _pfor_impl(
391
+ tiled_loop_fn,
392
+ parallel_iterations,
393
+ fallback_to_while_loop=fallback_to_while_loop,
394
+ pfor_config=pfor_config)
395
+
396
+ tiled_output_tensors = for_loop(
397
+ tiled_loop_body, loop_fn_dtypes,
398
+ num_tiled_iterations, parallel_iterations=1)
399
+ tiled_output_tensors = [
400
+ _flatten_first_two_dims(y) for y in tiled_output_tensors]
401
+
402
+ with ops.name_scope("pfor"):
403
+ if iters_value is None or iters_value % parallel_iterations:
404
+ output_tensors = cond.cond(
405
+ math_ops.equal(num_remaining_iterations, 0),
406
+ lambda: tiled_output_tensors,
407
+ lambda: [array_ops.concat([x, y], axis=0) # pylint: disable=g-long-lambda
408
+ for x, y in zip(remaining_output_tensors,
409
+ tiled_output_tensors)])
410
+ else:
411
+ output_tensors = tiled_output_tensors
412
+ flattened_output_tensors = nest.flatten(output_tensors)
413
+
414
+ for output, original_output in zip(flattened_output_tensors,
415
+ nest.flatten(loop_fn_output_tensors)):
416
+ # Restore any shape information lost from tiling.
417
+ # TODO(b/174254748): this may not be correct for stacked `variant`s.
418
+ output.set_shape(
419
+ tensor_shape.TensorShape([iters_value]).concatenate(
420
+ original_output.shape))
421
+ return nest.map_structure_up_to(
422
+ loop_fn_outputs,
423
+ functools.partial(_composite_from_tensors, batch_size=iters_value),
424
+ nest.pack_sequence_as(loop_fn_output_tensors,
425
+ flattened_output_tensors),
426
+ loop_fn_outputs)
427
+
428
+
429
+ def _broadcasting_gather(x, i):
430
+ """Wrapper for gather that implicitly broadcasts unit dimensions."""
431
+ static_first_dim = tensor_shape.dimension_value(x.shape[0])
432
+ if static_first_dim == 1:
433
+ i = 0
434
+ elif static_first_dim is None:
435
+ i = array_ops.where_v2(array_ops.shape(x)[0] > 1, i, 0)
436
+ result = array_ops.gather(x, i)
437
+ return result
438
+
439
+
440
+ # pylint: disable=protected-access
441
+ def _gather_from_tensor_or_composite(x, i):
442
+ """Wrapper for gather that handles CompositeTensors."""
443
+ if _should_expand_composite(x):
444
+ spec = x._type_spec
445
+ gathered_tensors = [_broadcasting_gather(t, i)
446
+ for t in spec._to_batched_tensor_list(x)]
447
+ return spec._unbatch()._from_compatible_tensor_list(gathered_tensors)
448
+ return _broadcasting_gather(x, i)
449
+ # pylint: enable=protected-access
450
+
451
+
452
+ @tf_export("vectorized_map")
453
+ def vectorized_map(fn, elems, fallback_to_while_loop=True, warn=True):
454
+ """Parallel map on the list of tensors unpacked from `elems` on dimension 0.
455
+
456
+ This method works similar to `tf.map_fn` but is optimized to run much faster,
457
+ possibly with a much larger memory footprint. The speedups are obtained by
458
+ vectorization (see [Auto-Vectorizing TensorFlow Graphs: Jacobians,
459
+ Auto-Batching and Beyond](https://arxiv.org/pdf/1903.04243.pdf)). The idea
460
+ behind vectorization is to semantically launch all the invocations of `fn` in
461
+ parallel and fuse corresponding operations across all these invocations. This
462
+ fusion is done statically at graph generation time and the generated code is
463
+ often similar in performance to a manually fused version.
464
+
465
+ Because `tf.vectorized_map` fully parallelizes the batch, this method will
466
+ generally be significantly faster than using `tf.map_fn`, especially in eager
467
+ mode. However this is an experimental feature and currently has a lot of
468
+ limitations:
469
+ - There should be no data dependency between the different semantic
470
+ invocations of `fn`, i.e. it should be safe to map the elements of the
471
+ inputs in any order.
472
+ - Stateful kernels may mostly not be supported since these often imply a
473
+ data dependency. We do support a limited set of such stateful kernels
474
+ though (like RandomFoo, Variable operations like reads, etc).
475
+ - `fn` has limited support for control flow operations.
476
+ - `fn` should return nested structure of Tensors or Operations. However
477
+ if an Operation is returned, it should have zero outputs.
478
+ - The shape and dtype of any intermediate or output tensors in the
479
+ computation of `fn` should not depend on the input to `fn`.
480
+
481
+ Examples:
482
+ ```python
483
+ def outer_product(a):
484
+ return tf.tensordot(a, a, 0)
485
+
486
+ batch_size = 100
487
+ a = tf.ones((batch_size, 32, 32))
488
+ c = tf.vectorized_map(outer_product, a)
489
+ assert c.shape == (batch_size, 32, 32, 32, 32)
490
+ ```
491
+
492
+ ```python
493
+ # Computing per-example gradients
494
+
495
+ batch_size = 10
496
+ num_features = 32
497
+ layer = tf.keras.layers.Dense(1)
498
+
499
+ def model_fn(arg):
500
+ with tf.GradientTape() as g:
501
+ inp, label = arg
502
+ inp = tf.expand_dims(inp, 0)
503
+ label = tf.expand_dims(label, 0)
504
+ prediction = layer(inp)
505
+ loss = tf.nn.l2_loss(label - prediction)
506
+ return g.gradient(loss, (layer.kernel, layer.bias))
507
+
508
+ inputs = tf.random.uniform([batch_size, num_features])
509
+ labels = tf.random.uniform([batch_size, 1])
510
+ per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
511
+ assert per_example_gradients[0].shape == (batch_size, num_features, 1)
512
+ assert per_example_gradients[1].shape == (batch_size, 1)
513
+ ```
514
+
515
+ Args:
516
+ fn: The callable to be performed. It accepts one argument, which will have
517
+ the same (possibly nested) structure as `elems`, and returns a possibly
518
+ nested structure of Tensors and Operations, which may be different than
519
+ the structure of `elems`.
520
+ elems: A tensor or (possibly nested) sequence of tensors, each of which will
521
+ be unpacked along their first dimension. The nested sequence of the
522
+ resulting slices will be mapped over by `fn`. The first dimensions of all
523
+ elements must broadcast to a consistent value; equivalently, each
524
+ element tensor must have first dimension of either `B` or `1`, for some
525
+ common batch size `B >= 1`.
526
+ fallback_to_while_loop: If true, on failing to vectorize an operation,
527
+ the unsupported op is wrapped in a tf.while_loop to execute the map
528
+ iterations. Note that this fallback only happens for unsupported ops and
529
+ other parts of `fn` are still vectorized. If false, on encountering an
530
+ unsupported op, a ValueError is thrown. Note that the fallbacks can result
531
+ in slowdowns since vectorization often yields speedup of one to two orders
532
+ of magnitude.
533
+ warn: If set to `false`, this will supress any warnings due to operation
534
+ conversions in the provided `fn` falling back to while loops.
535
+
536
+ Returns:
537
+ A tensor or (possibly nested) sequence of tensors. Each tensor packs the
538
+ results of applying fn to tensors unpacked from elems along the first
539
+ dimension, from first to last.
540
+
541
+ Although they are less common as user-visible inputs and outputs, note that
542
+ tensors of type `tf.variant` which represent tensor lists (for example from
543
+ `tf.raw_ops.TensorListFromTensor`) are vectorized by stacking the list
544
+ contents rather than the variant itself, and so the container tensor will
545
+ have a scalar shape when returned rather than the usual stacked shape. This
546
+ improves the performance of control flow gradient vectorization.
547
+
548
+ Raises:
549
+ ValueError: If vectorization fails and fallback_to_while_loop is False.
550
+ """
551
+ elems = variable_utils.convert_variables_to_tensors(elems)
552
+ elems = nest.map_structure(ops.convert_to_tensor,
553
+ elems,
554
+ expand_composites=True)
555
+
556
+ def loop_fn(i):
557
+ gathered_elems = nest.map_structure(
558
+ lambda x: _gather_from_tensor_or_composite(x, i), elems)
559
+ return fn(gathered_elems)
560
+
561
+ # Extract batch size from the maximum first dimension of any element.
562
+ flat_elems = nest.flatten(
563
+ nest.map_structure(
564
+ functools.partial(_composite_to_tensors,
565
+ is_batched=True),
566
+ elems))
567
+ def _get_shape(x):
568
+ if x.shape.rank is None:
569
+ return None
570
+ return x.shape.as_list()[0]
571
+ static_first_dims = [_get_shape(elem) for elem in flat_elems]
572
+ if any(s is None for s in static_first_dims):
573
+ batch_size = math_ops.reduce_max(
574
+ [array_ops.shape(elem)[0] for elem in flat_elems])
575
+ else:
576
+ batch_size = max(static_first_dims)
577
+
578
+ return pfor(
579
+ loop_fn,
580
+ batch_size,
581
+ fallback_to_while_loop=fallback_to_while_loop,
582
+ warn=warn)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/gradients.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Jacobian ops."""
16
+ from tensorflow.python.framework import ops
17
+ from tensorflow.python.framework import tensor
18
+ from tensorflow.python.ops import array_ops
19
+ from tensorflow.python.ops import check_ops
20
+ from tensorflow.python.ops import gradients_impl as gradient_ops
21
+ from tensorflow.python.ops.parallel_for import control_flow_ops
22
+ from tensorflow.python.util import nest
23
+
24
+
25
+ def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
26
+ """Computes jacobian of `output` w.r.t. `inputs`.
27
+
28
+ Args:
29
+ output: A tensor.
30
+ inputs: A tensor or a nested structure of tensor objects.
31
+ use_pfor: If true, uses pfor for computing the jacobian. Else uses
32
+ tf.while_loop.
33
+ parallel_iterations: A knob to control how many iterations and dispatched in
34
+ parallel. This knob can be used to control the total memory usage.
35
+
36
+ Returns:
37
+ A tensor or a nested structure of tensors with the same structure as
38
+ `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
39
+ value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
40
+ shape [x_1, ..., x_m], the corresponding jacobian has shape
41
+ [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
42
+ sparse (IndexedSlices), jacobian function currently makes it dense and
43
+ returns a Tensor instead. This may change in the future.
44
+ """
45
+ flat_inputs = nest.flatten(inputs)
46
+ output_tensor_shape = output.shape
47
+ output_shape = array_ops.shape(output)
48
+ output = array_ops.reshape(output, [-1])
49
+
50
+ def loop_fn(i):
51
+ y = array_ops.gather(output, i)
52
+ return gradient_ops.gradients(y, flat_inputs)
53
+
54
+ try:
55
+ output_size = int(output.shape[0])
56
+ except TypeError:
57
+ output_size = array_ops.shape(output)[0]
58
+
59
+ if use_pfor:
60
+ pfor_outputs = control_flow_ops.pfor(
61
+ loop_fn, output_size, parallel_iterations=parallel_iterations)
62
+ else:
63
+ pfor_outputs = control_flow_ops.for_loop(
64
+ loop_fn,
65
+ [output.dtype] * len(flat_inputs),
66
+ output_size,
67
+ parallel_iterations=parallel_iterations)
68
+
69
+ for i, out in enumerate(pfor_outputs):
70
+ if isinstance(out, tensor.Tensor):
71
+ new_shape = array_ops.concat(
72
+ [output_shape, array_ops.shape(out)[1:]], axis=0)
73
+ out = array_ops.reshape(out, new_shape)
74
+ out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
75
+ pfor_outputs[i] = out
76
+
77
+ return nest.pack_sequence_as(inputs, pfor_outputs)
78
+
79
+
80
+ def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
81
+ """Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
82
+
83
+ e.g.
84
+ x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
85
+ y = x * x
86
+ jacobian = batch_jacobian(y, x)
87
+ # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
88
+
89
+ Args:
90
+ output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
91
+ only depend on `inp[i,...]`.
92
+ inp: A tensor with shape [b, x1, ..., x_m]
93
+ use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
94
+ tf.while_loop.
95
+ parallel_iterations: A knob to control how many iterations are vectorized
96
+ and dispatched in parallel. The default value of None, when use_pfor is
97
+ true, corresponds to vectorizing all the iterations. When use_pfor is
98
+ false, the default value of None corresponds to parallel_iterations=10.
99
+ This knob can be used to control the total memory usage.
100
+
101
+ Returns:
102
+ A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
103
+ is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
104
+ per-example jacobians.
105
+
106
+ Raises:
107
+ ValueError: if first dimension of `output` and `inp` do not match.
108
+ """
109
+ output_shape = output.shape
110
+ if not output_shape[0].is_compatible_with(inp.shape[0]):
111
+ raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
112
+ f"and `inp` shape ({inp.shape}) to match.")
113
+ if output_shape.is_fully_defined():
114
+ batch_size = int(output_shape[0])
115
+ output_row_size = output_shape.num_elements() // batch_size
116
+ else:
117
+ output_shape = array_ops.shape(output)
118
+ batch_size = output_shape[0]
119
+ output_row_size = array_ops.size(output) // batch_size
120
+ inp_shape = array_ops.shape(inp)
121
+ # Flatten output to 2-D.
122
+ with ops.control_dependencies(
123
+ [check_ops.assert_equal(batch_size, inp_shape[0])]):
124
+ output = array_ops.reshape(output, [batch_size, output_row_size])
125
+
126
+ def loop_fn(i):
127
+ y = array_ops.gather(output, i, axis=1)
128
+ return gradient_ops.gradients(y, inp)[0]
129
+
130
+ if use_pfor:
131
+ pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
132
+ parallel_iterations=parallel_iterations)
133
+ else:
134
+ pfor_output = control_flow_ops.for_loop(
135
+ loop_fn, output.dtype,
136
+ output_row_size,
137
+ parallel_iterations=parallel_iterations)
138
+ if pfor_output is None:
139
+ return None
140
+ pfor_output = array_ops.reshape(pfor_output,
141
+ [output_row_size, batch_size, -1])
142
+ output = array_ops.transpose(pfor_output, [1, 0, 2])
143
+ new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
144
+ return array_ops.reshape(output, new_shape)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/pfor.py ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/test_util.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Test utility."""
16
+
17
+ import numpy as np
18
+
19
+ from tensorflow.python.ops import variables
20
+ from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
21
+ from tensorflow.python.platform import test
22
+ from tensorflow.python.util import nest
23
+
24
+
25
+ class PForTestCase(test.TestCase):
26
+ """Base class for test cases."""
27
+
28
+ def _run_targets(self, targets1, targets2=None, run_init=True):
29
+ targets1 = nest.flatten(targets1)
30
+ targets2 = ([] if targets2 is None else nest.flatten(targets2))
31
+ assert len(targets1) == len(targets2) or not targets2
32
+ if run_init:
33
+ init = variables.global_variables_initializer()
34
+ self.evaluate(init)
35
+ return self.evaluate(targets1 + targets2)
36
+
37
+ # TODO(agarwal): Allow tests to pass down tolerances.
38
+ def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
39
+ outputs = self._run_targets(targets1, targets2)
40
+ outputs = nest.flatten(outputs) # flatten SparseTensorValues
41
+ n = len(outputs) // 2
42
+ for i in range(n):
43
+ if outputs[i + n].dtype != np.object_:
44
+ self.assertAllClose(outputs[i + n], outputs[i], rtol=rtol, atol=atol)
45
+ else:
46
+ self.assertAllEqual(outputs[i + n], outputs[i])
47
+
48
+ def _test_loop_fn(self,
49
+ loop_fn,
50
+ iters,
51
+ parallel_iterations=None,
52
+ fallback_to_while_loop=False,
53
+ rtol=1e-4,
54
+ atol=1e-5):
55
+ t1 = pfor_control_flow_ops.pfor(
56
+ loop_fn,
57
+ iters=iters,
58
+ fallback_to_while_loop=fallback_to_while_loop,
59
+ parallel_iterations=parallel_iterations)
60
+ loop_fn_dtypes = nest.map_structure(lambda x: x.dtype, t1)
61
+ t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
62
+ parallel_iterations=parallel_iterations)
63
+
64
+ def _check_shape(a, b):
65
+ msg = (
66
+ "Inferred static shapes are different between two loops:"
67
+ f" {a.shape} vs {b.shape}."
68
+ )
69
+ # TODO(b/268146947): should assert bool(a.shape) == bool(b.shape),
70
+ # since both should be either defined or undefined. But it does not work.
71
+ if b.shape:
72
+ self.assertEqual(a.shape.as_list()[0], b.shape.as_list()[0], msg)
73
+ # TODO(b/268146947): self.assertShapeEqual(a, b, msg) does not work.
74
+
75
+ nest.map_structure(_check_shape, t1, t2)
76
+ self.run_and_assert_equal(t1, t2, rtol=rtol, atol=atol)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (907 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/dynamic_ragged_shape.cpython-310.pyc ADDED
Binary file (92.2 kB). View file