Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_next/share/terminfo/a/aaa-30-s-rv +0 -0
- llava_next/share/terminfo/a/aaa-rv-unk +0 -0
- llava_next/share/terminfo/a/abm85h-old +0 -0
- llava_next/share/terminfo/a/adds200 +0 -0
- llava_next/share/terminfo/a/addsvp60 +0 -0
- llava_next/share/terminfo/a/adm+sgr +0 -0
- llava_next/share/terminfo/a/adm36 +0 -0
- llava_next/share/terminfo/a/aj830 +0 -0
- llava_next/share/terminfo/a/ampex-219 +0 -0
- llava_next/share/terminfo/a/annarbor4080 +0 -0
- llava_next/share/terminfo/a/ansi+pp +0 -0
- llava_next/share/terminfo/a/ansi+sgr +0 -0
- llava_next/share/terminfo/a/ansi+sgrul +0 -0
- llava_next/share/terminfo/a/ansi-color-2-emx +0 -0
- llava_next/share/terminfo/a/apl +0 -0
- llava_next/share/terminfo/a/at-color +0 -0
- llava_next/share/terminfo/a/atari +0 -0
- llava_next/share/terminfo/a/atari_st +0 -0
- llava_next/share/terminfo/a/atari_st-color +0 -0
- llava_next/share/terminfo/a/att2350 +0 -0
- llava_next/share/terminfo/a/att4410v1 +0 -0
- llava_next/share/terminfo/a/att4410v1-w +0 -0
- llava_next/share/terminfo/a/att4424 +0 -0
- llava_next/share/terminfo/a/att4425 +0 -0
- llava_next/share/terminfo/a/att4426 +0 -0
- llava_next/share/terminfo/a/att505-22 +0 -0
- llava_next/share/terminfo/a/att5420-w-rv +0 -0
- llava_next/share/terminfo/a/att5425-w +0 -0
- llava_next/share/terminfo/a/att615-103k-w +0 -0
- llava_next/share/terminfo/a/att700 +0 -0
- llava_next/share/terminfo/a/att730-24 +0 -0
- llava_next/share/terminfo/a/att7300 +0 -0
- llava_next/share/terminfo/a/avt +0 -0
- llava_next/share/terminfo/a/avt-w +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/batch_ops.py +123 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/bitwise_ops.py +33 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/composite_tensor_ops.py +118 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_assert.py +130 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_ops.py +2256 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_switch_case.py +253 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_func_graphs.py +56 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_toggles.py +69 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/custom_gradient.py +823 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/default_gradient.py +80 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/filesystem_ops.py +38 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_batch_ops.py +699 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_bitwise_ops.py +765 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_collective_ops.py +1452 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_composite_tensor_ops.py +172 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py +0 -0
llava_next/share/terminfo/a/aaa-30-s-rv
ADDED
|
Binary file (1.41 kB). View file
|
|
|
llava_next/share/terminfo/a/aaa-rv-unk
ADDED
|
Binary file (484 Bytes). View file
|
|
|
llava_next/share/terminfo/a/abm85h-old
ADDED
|
Binary file (802 Bytes). View file
|
|
|
llava_next/share/terminfo/a/adds200
ADDED
|
Binary file (728 Bytes). View file
|
|
|
llava_next/share/terminfo/a/addsvp60
ADDED
|
Binary file (520 Bytes). View file
|
|
|
llava_next/share/terminfo/a/adm+sgr
ADDED
|
Binary file (172 Bytes). View file
|
|
|
llava_next/share/terminfo/a/adm36
ADDED
|
Binary file (1.41 kB). View file
|
|
|
llava_next/share/terminfo/a/aj830
ADDED
|
Binary file (357 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ampex-219
ADDED
|
Binary file (709 Bytes). View file
|
|
|
llava_next/share/terminfo/a/annarbor4080
ADDED
|
Binary file (473 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi+pp
ADDED
|
Binary file (318 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi+sgr
ADDED
|
Binary file (368 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi+sgrul
ADDED
|
Binary file (143 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi-color-2-emx
ADDED
|
Binary file (1.72 kB). View file
|
|
|
llava_next/share/terminfo/a/apl
ADDED
|
Binary file (424 Bytes). View file
|
|
|
llava_next/share/terminfo/a/at-color
ADDED
|
Binary file (1.97 kB). View file
|
|
|
llava_next/share/terminfo/a/atari
ADDED
|
Binary file (856 Bytes). View file
|
|
|
llava_next/share/terminfo/a/atari_st
ADDED
|
Binary file (856 Bytes). View file
|
|
|
llava_next/share/terminfo/a/atari_st-color
ADDED
|
Binary file (1.97 kB). View file
|
|
|
llava_next/share/terminfo/a/att2350
ADDED
|
Binary file (1.01 kB). View file
|
|
|
llava_next/share/terminfo/a/att4410v1
ADDED
|
Binary file (1.13 kB). View file
|
|
|
llava_next/share/terminfo/a/att4410v1-w
ADDED
|
Binary file (1.14 kB). View file
|
|
|
llava_next/share/terminfo/a/att4424
ADDED
|
Binary file (775 Bytes). View file
|
|
|
llava_next/share/terminfo/a/att4425
ADDED
|
Binary file (1.61 kB). View file
|
|
|
llava_next/share/terminfo/a/att4426
ADDED
|
Binary file (807 Bytes). View file
|
|
|
llava_next/share/terminfo/a/att505-22
ADDED
|
Binary file (1.18 kB). View file
|
|
|
llava_next/share/terminfo/a/att5420-w-rv
ADDED
|
Binary file (1.4 kB). View file
|
|
|
llava_next/share/terminfo/a/att5425-w
ADDED
|
Binary file (1.63 kB). View file
|
|
|
llava_next/share/terminfo/a/att615-103k-w
ADDED
|
Binary file (1.7 kB). View file
|
|
|
llava_next/share/terminfo/a/att700
ADDED
|
Binary file (1.72 kB). View file
|
|
|
llava_next/share/terminfo/a/att730-24
ADDED
|
Binary file (1.9 kB). View file
|
|
|
llava_next/share/terminfo/a/att7300
ADDED
|
Binary file (1.01 kB). View file
|
|
|
llava_next/share/terminfo/a/avt
ADDED
|
Binary file (1.23 kB). View file
|
|
|
llava_next/share/terminfo/a/avt-w
ADDED
|
Binary file (1.23 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/batch_ops.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Operations for automatic batching and unbatching."""
|
| 17 |
+
from tensorflow.python.eager import def_function
|
| 18 |
+
from tensorflow.python.framework import ops
|
| 19 |
+
from tensorflow.python.framework import tensor
|
| 20 |
+
from tensorflow.python.ops import gen_batch_ops
|
| 21 |
+
# pylint: disable=wildcard-import
|
| 22 |
+
from tensorflow.python.ops.gen_batch_ops import *
|
| 23 |
+
# pylint: enable=wildcard-import
|
| 24 |
+
from tensorflow.python.util import nest
|
| 25 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@tf_export("nondifferentiable_batch_function")
|
| 29 |
+
def batch_function(num_batch_threads,
|
| 30 |
+
max_batch_size,
|
| 31 |
+
batch_timeout_micros,
|
| 32 |
+
allowed_batch_sizes=None,
|
| 33 |
+
max_enqueued_batches=10,
|
| 34 |
+
autograph=True,
|
| 35 |
+
enable_large_batch_splitting=True):
|
| 36 |
+
"""Batches the computation done by the decorated function.
|
| 37 |
+
|
| 38 |
+
So, for example, in the following code
|
| 39 |
+
|
| 40 |
+
```python
|
| 41 |
+
@batch_function(1, 2, 3)
|
| 42 |
+
def layer(a):
|
| 43 |
+
return tf.matmul(a, a)
|
| 44 |
+
|
| 45 |
+
b = layer(w)
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
if more than one session.run call is simultaneously trying to compute `b`
|
| 49 |
+
the values of `w` will be gathered, non-deterministically concatenated
|
| 50 |
+
along the first axis, and only one thread will run the computation. See the
|
| 51 |
+
documentation of the `Batch` op for more details.
|
| 52 |
+
|
| 53 |
+
Assumes that all arguments of the decorated function are Tensors which will
|
| 54 |
+
be batched along their first dimension.
|
| 55 |
+
|
| 56 |
+
SparseTensor is not supported. The return value of the decorated function
|
| 57 |
+
must be a Tensor or a list/tuple of Tensors.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
num_batch_threads: Number of scheduling threads for processing batches
|
| 61 |
+
of work. Determines the number of batches processed in parallel.
|
| 62 |
+
max_batch_size: Batch sizes will never be bigger than this.
|
| 63 |
+
batch_timeout_micros: Maximum number of microseconds to wait before
|
| 64 |
+
outputting an incomplete batch.
|
| 65 |
+
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
|
| 66 |
+
does nothing. Otherwise, supplies a list of batch sizes, causing the op
|
| 67 |
+
to pad batches up to one of those sizes. The entries must increase
|
| 68 |
+
monotonically, and the final entry must equal max_batch_size.
|
| 69 |
+
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
|
| 70 |
+
autograph: Whether to use autograph to compile python and eager style code
|
| 71 |
+
for efficient graph-mode execution.
|
| 72 |
+
enable_large_batch_splitting: The value of this option doesn't affect
|
| 73 |
+
processing output given the same input; it affects implementation details
|
| 74 |
+
as stated below: 1. Improve batching efficiency by eliminating unnecessary
|
| 75 |
+
adding. 2.`max_batch_size` specifies the limit of input and
|
| 76 |
+
`allowed_batch_sizes` specifies the limit of a task to be processed. API
|
| 77 |
+
user can give an input of size 128 when 'max_execution_batch_size'
|
| 78 |
+
is 32 -> implementation can split input of 128 into 4 x 32, schedule
|
| 79 |
+
concurrent processing, and then return concatenated results corresponding
|
| 80 |
+
to 128.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
The decorated function will return the unbatched computation output Tensors.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def decorator(fn): # pylint: disable=missing-docstring
|
| 87 |
+
|
| 88 |
+
def decorated(*args): # pylint: disable=missing-docstring
|
| 89 |
+
|
| 90 |
+
@def_function.function(autograph=autograph)
|
| 91 |
+
def computation(*computation_args):
|
| 92 |
+
return fn(*computation_args)
|
| 93 |
+
|
| 94 |
+
computation = computation.get_concrete_function(*[
|
| 95 |
+
tensor.TensorSpec(
|
| 96 |
+
dtype=x.dtype, shape=x.shape, name="batch_" + str(i))
|
| 97 |
+
for i, x in enumerate(args)
|
| 98 |
+
])
|
| 99 |
+
|
| 100 |
+
with ops.name_scope("batch") as name:
|
| 101 |
+
for a in args:
|
| 102 |
+
if not isinstance(a, tensor.Tensor):
|
| 103 |
+
raise ValueError("All arguments to functions decorated with "
|
| 104 |
+
"`batch_function` are supposed to be Tensors; "
|
| 105 |
+
f"found {a!r}.")
|
| 106 |
+
outputs = gen_batch_ops.batch_function(
|
| 107 |
+
num_batch_threads=num_batch_threads,
|
| 108 |
+
max_batch_size=max_batch_size,
|
| 109 |
+
batch_timeout_micros=batch_timeout_micros,
|
| 110 |
+
allowed_batch_sizes=allowed_batch_sizes,
|
| 111 |
+
max_enqueued_batches=max_enqueued_batches,
|
| 112 |
+
shared_name=name,
|
| 113 |
+
enable_large_batch_splitting=enable_large_batch_splitting,
|
| 114 |
+
f=computation,
|
| 115 |
+
in_tensors=list(args),
|
| 116 |
+
captured_tensors=computation.captured_inputs,
|
| 117 |
+
Tout=[o.dtype for o in computation.outputs])
|
| 118 |
+
return nest.pack_sequence_as(
|
| 119 |
+
computation.structured_outputs, outputs, expand_composites=True)
|
| 120 |
+
|
| 121 |
+
return decorated
|
| 122 |
+
|
| 123 |
+
return decorator
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/bitwise_ops.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Operations for manipulating the binary representations of integers.
|
| 17 |
+
|
| 18 |
+
API docstring: tensorflow.bitwise
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from tensorflow.python.framework import ops
|
| 22 |
+
# go/tf-wildcard-import
|
| 23 |
+
# pylint: disable=wildcard-import
|
| 24 |
+
from tensorflow.python.ops.gen_bitwise_ops import *
|
| 25 |
+
# pylint: enable=wildcard-import
|
| 26 |
+
|
| 27 |
+
ops.NotDifferentiable("BitwiseAnd")
|
| 28 |
+
ops.NotDifferentiable("BitwiseOr")
|
| 29 |
+
ops.NotDifferentiable("BitwiseXor")
|
| 30 |
+
ops.NotDifferentiable("Invert")
|
| 31 |
+
ops.NotDifferentiable("PopulationCount")
|
| 32 |
+
ops.NotDifferentiable("LeftShift")
|
| 33 |
+
ops.NotDifferentiable("RightShift")
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/composite_tensor_ops.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
"""Operations for ExtensionTypes (aka Composite Tensors)."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.core.protobuf import composite_tensor_variant_pb2
|
| 18 |
+
from tensorflow.python.framework import composite_tensor
|
| 19 |
+
from tensorflow.python.framework import dtypes
|
| 20 |
+
from tensorflow.python.framework import ops
|
| 21 |
+
from tensorflow.python.framework import tensor
|
| 22 |
+
from tensorflow.python.ops import gen_composite_tensor_ops
|
| 23 |
+
from tensorflow.python.saved_model import nested_structure_coder
|
| 24 |
+
from tensorflow.python.util import nest
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def composite_tensor_to_variants(value, type_spec=None, name=None):
|
| 28 |
+
"""Encodes `value` as a scalar variant tensor.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
value: The `ExtensionType` value to encode.
|
| 32 |
+
type_spec: Information about the value's type that should be included in the
|
| 33 |
+
encoding.
|
| 34 |
+
name: Optional name for the operation.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
A Tensor with shape=`()` and dtype=`tf.variant`.
|
| 38 |
+
|
| 39 |
+
Raises:
|
| 40 |
+
ValueError: If `type_spec` is not compatible with `value`.
|
| 41 |
+
"""
|
| 42 |
+
if not isinstance(value, composite_tensor.CompositeTensor):
|
| 43 |
+
raise TypeError("Expected `value` to be a CompositeTensor. "
|
| 44 |
+
f"Received {type(value)}.")
|
| 45 |
+
|
| 46 |
+
if type_spec is None:
|
| 47 |
+
type_spec = value._type_spec # pylint: disable=protected-access
|
| 48 |
+
if not type_spec.is_compatible_with(value):
|
| 49 |
+
raise ValueError(f"`type_spec` {type_spec} is not compatible with `value` "
|
| 50 |
+
f"{value!r}.")
|
| 51 |
+
metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata()
|
| 52 |
+
metadata.type_spec_proto.CopyFrom(
|
| 53 |
+
nested_structure_coder.encode_structure(type_spec).type_spec_value)
|
| 54 |
+
|
| 55 |
+
return gen_composite_tensor_ops.CompositeTensorVariantFromComponents(
|
| 56 |
+
components=nest.flatten(value, expand_composites=True),
|
| 57 |
+
metadata=metadata.SerializeToString(),
|
| 58 |
+
name=name)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def composite_tensor_from_variant(encoded, type_spec, name=None):
|
| 62 |
+
"""Returns the `ExtensionType` value encoded by a variant scalar tensor.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
encoded: A Tensor returned by `composite_tensor_to_variants`.
|
| 66 |
+
type_spec: The `TypeSpec` of the original value. This is used to determine
|
| 67 |
+
the number and types of the component tensors that comprise the decoded
|
| 68 |
+
value. Must be compatible with the `TypeSpec` serilized in `encoded`.
|
| 69 |
+
name: Optional name for the operation.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
An `ExtensionType` value that is compatible with `TypeSpec`.
|
| 73 |
+
|
| 74 |
+
Raises:
|
| 75 |
+
TypeError: If `encoded` is not a Tensor with dtype=variant.
|
| 76 |
+
InvalidArgumentError: If `encoded` is not compatible with `type_spec`.
|
| 77 |
+
"""
|
| 78 |
+
if not isinstance(encoded, tensor.Tensor):
|
| 79 |
+
raise TypeError(f"Expected `encoded` to be a Tensor, got {encoded!r}.")
|
| 80 |
+
if encoded.dtype != dtypes.variant:
|
| 81 |
+
raise TypeError("Expected `encoded` to have dtype=variant, got "
|
| 82 |
+
f"{encoded!r}.")
|
| 83 |
+
encoded.shape.assert_is_compatible_with(())
|
| 84 |
+
|
| 85 |
+
metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata()
|
| 86 |
+
metadata.type_spec_proto.CopyFrom(
|
| 87 |
+
nested_structure_coder.encode_structure(type_spec).type_spec_value)
|
| 88 |
+
|
| 89 |
+
component_dtypes = [
|
| 90 |
+
t.dtype for t in nest.flatten(type_spec, expand_composites=True)
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
components = gen_composite_tensor_ops.CompositeTensorVariantToComponents(
|
| 94 |
+
encoded=encoded,
|
| 95 |
+
metadata=metadata.SerializeToString(),
|
| 96 |
+
Tcomponents=component_dtypes,
|
| 97 |
+
name=name)
|
| 98 |
+
return nest.pack_sequence_as(type_spec, components, expand_composites=True)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@ops.RegisterGradient("CompositeTensorVariantFromComponents")
|
| 102 |
+
def _composite_tensor_to_variants_grad(op, grad):
|
| 103 |
+
return gen_composite_tensor_ops.CompositeTensorVariantToComponents(
|
| 104 |
+
encoded=grad,
|
| 105 |
+
metadata=op.get_attr("metadata"),
|
| 106 |
+
Tcomponents=op.get_attr("Tcomponents"))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@ops.RegisterGradient("CompositeTensorVariantToComponents")
|
| 110 |
+
def _composite_tensor_from_variant_grad(op, *grad):
|
| 111 |
+
assert len(grad) == len(op.outputs)
|
| 112 |
+
# `components` is `op.outputs`, but with any tensors for which we're
|
| 113 |
+
# taking the gradient replaced by the corresponding value from `grad`.
|
| 114 |
+
components = [
|
| 115 |
+
op.outputs[i] if grad[i] is None else grad[i] for i in range(len(grad))
|
| 116 |
+
]
|
| 117 |
+
return gen_composite_tensor_ops.CompositeTensorVariantFromComponents(
|
| 118 |
+
components=components, metadata=op.get_attr("metadata"))
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_assert.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Assert functions for Control Flow Operations."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.eager import context
|
| 18 |
+
from tensorflow.python.framework import dtypes
|
| 19 |
+
from tensorflow.python.framework import errors
|
| 20 |
+
from tensorflow.python.framework import ops
|
| 21 |
+
from tensorflow.python.ops import array_ops
|
| 22 |
+
from tensorflow.python.ops import cond
|
| 23 |
+
from tensorflow.python.ops import gen_control_flow_ops
|
| 24 |
+
from tensorflow.python.ops import gen_logging_ops
|
| 25 |
+
from tensorflow.python.ops import gen_math_ops
|
| 26 |
+
from tensorflow.python.util import dispatch
|
| 27 |
+
from tensorflow.python.util import tf_should_use
|
| 28 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _summarize_eager(tensor, summarize=None):
|
| 32 |
+
"""Returns a summarized string representation of eager `tensor`.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
tensor: EagerTensor to summarize
|
| 36 |
+
summarize: Include these many first elements of `array`
|
| 37 |
+
"""
|
| 38 |
+
# Emulate the behavior of Tensor::SummarizeValue()
|
| 39 |
+
if summarize is None:
|
| 40 |
+
summarize = 3
|
| 41 |
+
elif summarize < 0:
|
| 42 |
+
summarize = array_ops.size(tensor)
|
| 43 |
+
|
| 44 |
+
# reshape((-1,)) is the fastest way to get a flat array view
|
| 45 |
+
if tensor._rank(): # pylint: disable=protected-access
|
| 46 |
+
flat = tensor.numpy().reshape((-1,))
|
| 47 |
+
lst = [str(x) for x in flat[:summarize]]
|
| 48 |
+
if len(lst) < flat.size:
|
| 49 |
+
lst.append("...")
|
| 50 |
+
else:
|
| 51 |
+
# tensor.numpy() returns a scalar for zero dimensional arrays
|
| 52 |
+
if gen_math_ops.not_equal(summarize, 0):
|
| 53 |
+
lst = [str(tensor.numpy())]
|
| 54 |
+
else:
|
| 55 |
+
lst = []
|
| 56 |
+
|
| 57 |
+
return ", ".join(lst)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Assert and Print are special symbols in python, so we must
|
| 61 |
+
# use an upper-case version of them.
|
| 62 |
+
@tf_export("debugging.Assert", "Assert")
|
| 63 |
+
@dispatch.add_dispatch_support
|
| 64 |
+
@tf_should_use.should_use_result
|
| 65 |
+
def Assert(condition, data, summarize=None, name=None):
|
| 66 |
+
"""Asserts that the given condition is true.
|
| 67 |
+
|
| 68 |
+
If `condition` evaluates to false, print the list of tensors in `data`.
|
| 69 |
+
`summarize` determines how many entries of the tensors to print.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
condition: The condition to evaluate.
|
| 73 |
+
data: The tensors to print out when condition is false.
|
| 74 |
+
summarize: Print this many entries of each tensor.
|
| 75 |
+
name: A name for this operation (optional).
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
assert_op: An `Operation` that, when executed, raises a
|
| 79 |
+
`tf.errors.InvalidArgumentError` if `condition` is not true.
|
| 80 |
+
@compatibility(eager)
|
| 81 |
+
returns None
|
| 82 |
+
@end_compatibility
|
| 83 |
+
|
| 84 |
+
Raises:
|
| 85 |
+
@compatibility(TF1)
|
| 86 |
+
When in TF V1 mode (that is, outside `tf.function`) Assert needs a control
|
| 87 |
+
dependency on the output to ensure the assertion executes:
|
| 88 |
+
|
| 89 |
+
```python
|
| 90 |
+
# Ensure maximum element of x is smaller or equal to 1
|
| 91 |
+
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
|
| 92 |
+
with tf.control_dependencies([assert_op]):
|
| 93 |
+
... code using x ...
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
@end_compatibility
|
| 97 |
+
"""
|
| 98 |
+
if context.executing_eagerly():
|
| 99 |
+
if not condition:
|
| 100 |
+
xs = ops.convert_n_to_tensor(data)
|
| 101 |
+
data_str = [_summarize_eager(x, summarize) for x in xs]
|
| 102 |
+
raise errors.InvalidArgumentError(
|
| 103 |
+
node_def=None,
|
| 104 |
+
op=None,
|
| 105 |
+
message="Expected '%s' to be true. Summarized data: %s" %
|
| 106 |
+
(condition, "\n".join(data_str)))
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
with ops.name_scope(name, "Assert", [condition, data]) as name:
|
| 110 |
+
xs = ops.convert_n_to_tensor(data)
|
| 111 |
+
if all(x.dtype in {dtypes.string, dtypes.int32} for x in xs):
|
| 112 |
+
# As a simple heuristic, we assume that string and int32 are
|
| 113 |
+
# on host to avoid the need to use cond. If it is not case,
|
| 114 |
+
# we will pay the price copying the tensor to host memory.
|
| 115 |
+
return gen_logging_ops._assert(condition, data, summarize, name="Assert") # pylint: disable=protected-access
|
| 116 |
+
else:
|
| 117 |
+
condition = ops.convert_to_tensor(condition, name="Condition")
|
| 118 |
+
|
| 119 |
+
def true_assert():
|
| 120 |
+
return gen_logging_ops._assert( # pylint: disable=protected-access
|
| 121 |
+
condition, data, summarize, name="Assert")
|
| 122 |
+
|
| 123 |
+
guarded_assert = cond.cond(
|
| 124 |
+
condition,
|
| 125 |
+
gen_control_flow_ops.no_op,
|
| 126 |
+
true_assert,
|
| 127 |
+
name="AssertGuard")
|
| 128 |
+
if context.executing_eagerly():
|
| 129 |
+
return
|
| 130 |
+
return guarded_assert.op
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_ops.py
ADDED
|
@@ -0,0 +1,2256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Control Flow Operations.
|
| 16 |
+
|
| 17 |
+
See the [autograph](https://www.tensorflow.org/guide/autograph) guide.
|
| 18 |
+
"""
|
| 19 |
+
# pylint: disable=g-bad-name
|
| 20 |
+
import abc
|
| 21 |
+
|
| 22 |
+
from tensorflow.core.framework import attr_value_pb2
|
| 23 |
+
from tensorflow.core.protobuf import control_flow_pb2
|
| 24 |
+
from tensorflow.python.eager import context
|
| 25 |
+
from tensorflow.python.framework import composite_tensor
|
| 26 |
+
from tensorflow.python.framework import constant_op
|
| 27 |
+
from tensorflow.python.framework import dtypes
|
| 28 |
+
from tensorflow.python.framework import indexed_slices
|
| 29 |
+
from tensorflow.python.framework import ops
|
| 30 |
+
from tensorflow.python.framework import tensor as tensor_lib
|
| 31 |
+
from tensorflow.python.framework import tensor_shape
|
| 32 |
+
from tensorflow.python.framework import tensor_util
|
| 33 |
+
from tensorflow.python.framework import type_spec
|
| 34 |
+
from tensorflow.python.ops import array_ops
|
| 35 |
+
from tensorflow.python.ops import control_flow_util as util
|
| 36 |
+
from tensorflow.python.ops import gen_array_ops
|
| 37 |
+
from tensorflow.python.ops import gen_control_flow_ops
|
| 38 |
+
from tensorflow.python.ops import math_ops
|
| 39 |
+
from tensorflow.python.ops import tensor_array_ops
|
| 40 |
+
# go/tf-wildcard-import
|
| 41 |
+
# pylint: disable=wildcard-import,undefined-variable
|
| 42 |
+
from tensorflow.python.ops.gen_control_flow_ops import *
|
| 43 |
+
# pylint: enable=wildcard-import
|
| 44 |
+
from tensorflow.python.util import compat
|
| 45 |
+
from tensorflow.python.util import dispatch
|
| 46 |
+
from tensorflow.python.util import nest
|
| 47 |
+
from tensorflow.python.util import variable_utils
|
| 48 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# We override the 'tuple' for a control flow op, so we keep python's
|
| 52 |
+
# existing 'tuple' for later use in this module.
|
| 53 |
+
_basetuple = tuple
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# pylint: disable=protected-access
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _Identity(tensor, name=None):
|
| 60 |
+
"""Return a tensor with the same shape and contents as the input tensor.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
tensor: A Tensor.
|
| 64 |
+
name: A name for this operation (optional).
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
A Tensor with the same type and value as the input Tensor.
|
| 68 |
+
"""
|
| 69 |
+
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
|
| 70 |
+
# TODO(b/246438937): Remove this when we expand ResourceVariables into
|
| 71 |
+
# dt_resource tensors.
|
| 72 |
+
tensor = variable_utils.convert_variables_to_tensors(tensor)
|
| 73 |
+
if isinstance(tensor, tensor_lib.Tensor):
|
| 74 |
+
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
|
| 75 |
+
return gen_array_ops.ref_identity(tensor, name=name)
|
| 76 |
+
else:
|
| 77 |
+
return array_ops.identity(tensor, name=name)
|
| 78 |
+
elif isinstance(tensor, composite_tensor.CompositeTensor):
|
| 79 |
+
return nest.map_structure(_Identity, tensor, expand_composites=True)
|
| 80 |
+
else:
|
| 81 |
+
raise TypeError("'tensor' must be a Tensor or CompositeTensor. "
|
| 82 |
+
f"Received: {type(tensor)}.")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _NextIteration(tensor, name=None):
|
| 86 |
+
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
|
| 87 |
+
if isinstance(tensor, tensor_lib.Tensor):
|
| 88 |
+
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
|
| 89 |
+
return ref_next_iteration(tensor, name=name)
|
| 90 |
+
else:
|
| 91 |
+
return next_iteration(tensor, name=name)
|
| 92 |
+
elif isinstance(tensor, composite_tensor.CompositeTensor):
|
| 93 |
+
return nest.map_structure(_NextIteration, tensor, expand_composites=True)
|
| 94 |
+
else:
|
| 95 |
+
raise TypeError("'tensor' must be a Tensor or CompositeTensor. "
|
| 96 |
+
f"Received: {type(tensor)}.")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _Enter(tensor,
|
| 100 |
+
frame_name,
|
| 101 |
+
is_constant=False,
|
| 102 |
+
parallel_iterations=10,
|
| 103 |
+
use_ref=True,
|
| 104 |
+
use_input_shape=True,
|
| 105 |
+
name=None):
|
| 106 |
+
"""Creates or finds a child frame, and makes `tensor` available to it.
|
| 107 |
+
|
| 108 |
+
The unique `frame_name` is used by the `Executor` to identify frames. If
|
| 109 |
+
`is_constant` is true, `tensor` is a constant in the child frame; otherwise
|
| 110 |
+
it may be changed in the child frame. At most `parallel_iterations`
|
| 111 |
+
iterations are run in parallel in the child frame.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
tensor: The tensor to be made available to the child frame.
|
| 115 |
+
frame_name: The name of the child frame.
|
| 116 |
+
is_constant: If true, the output is constant within the child frame.
|
| 117 |
+
parallel_iterations: The number of iterations allowed to run in parallel.
|
| 118 |
+
use_ref: If true, use ref_enter if tensor is of ref type.
|
| 119 |
+
use_input_shape: If true, set the result's shape based on tensor's shape.
|
| 120 |
+
name: A name for this operation (optional).
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
The same tensor as `tensor`.
|
| 124 |
+
|
| 125 |
+
Raises:
|
| 126 |
+
ValueError: If any tensor in `tensor` has a less specific shape
|
| 127 |
+
than its corresponding shape in `shape_invariant`.
|
| 128 |
+
"""
|
| 129 |
+
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
|
| 130 |
+
if isinstance(tensor, tensor_lib.Tensor):
|
| 131 |
+
if tensor.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access
|
| 132 |
+
result = gen_control_flow_ops.ref_enter(
|
| 133 |
+
tensor, frame_name, is_constant, parallel_iterations, name=name)
|
| 134 |
+
else:
|
| 135 |
+
result = gen_control_flow_ops.enter(
|
| 136 |
+
tensor, frame_name, is_constant, parallel_iterations, name=name)
|
| 137 |
+
if use_input_shape:
|
| 138 |
+
result.set_shape(tensor.get_shape())
|
| 139 |
+
return result
|
| 140 |
+
elif isinstance(tensor, composite_tensor.CompositeTensor):
|
| 141 |
+
|
| 142 |
+
def enter_component(t):
|
| 143 |
+
return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref,
|
| 144 |
+
use_input_shape)
|
| 145 |
+
|
| 146 |
+
return nest.map_structure(enter_component, tensor, expand_composites=True)
|
| 147 |
+
else:
|
| 148 |
+
raise TypeError("'tensor' must be a Tensor or CompositeTensor. "
|
| 149 |
+
f"Received: {type(tensor)}.")
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def exit(tensor, name=None): # pylint: disable=redefined-builtin
|
| 153 |
+
"""Exits the current frame to its parent frame.
|
| 154 |
+
|
| 155 |
+
Exit makes its input `tensor` available to the parent frame.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
tensor: The tensor to be made available to the parent frame.
|
| 159 |
+
name: A name for this operation (optional).
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
The same tensor as `tensor`.
|
| 163 |
+
"""
|
| 164 |
+
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
|
| 165 |
+
if isinstance(tensor, tensor_lib.Tensor):
|
| 166 |
+
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
|
| 167 |
+
return gen_control_flow_ops.ref_exit(tensor, name)
|
| 168 |
+
else:
|
| 169 |
+
return gen_control_flow_ops._exit(tensor, name)
|
| 170 |
+
elif isinstance(tensor, composite_tensor.CompositeTensor):
|
| 171 |
+
return nest.map_structure(exit, tensor, expand_composites=True)
|
| 172 |
+
else:
|
| 173 |
+
raise TypeError("'tensor' must be a Tensor or CompositeTensor. "
|
| 174 |
+
f"Received: {type(tensor)}.")
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def switch(data, pred, dtype=None, name=None):
|
| 178 |
+
"""Forwards `data` to an output determined by `pred`.
|
| 179 |
+
|
| 180 |
+
If `pred` is false, the `data` input is forwarded to the first output.
|
| 181 |
+
Otherwise, the data goes to the second output.
|
| 182 |
+
|
| 183 |
+
This op handles `Tensor`s and `IndexedSlices`.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
data: The tensor to be forwarded to the appropriate output.
|
| 187 |
+
pred: A scalar that specifies which output port will receive data.
|
| 188 |
+
dtype: Optional element type for the returned tensor. If missing, the type
|
| 189 |
+
is inferred from the type of `value`.
|
| 190 |
+
name: A name for this operation (optional).
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
`(output_false, output_true)`: If `pred` is true, data will be forwarded
|
| 194 |
+
to `output_true`, otherwise it goes to `output_false`.
|
| 195 |
+
"""
|
| 196 |
+
with ops.name_scope(name, "Switch", [data, pred]) as name:
|
| 197 |
+
data = ops.internal_convert_to_tensor_or_composite(
|
| 198 |
+
data, dtype=dtype, name="data", as_ref=True)
|
| 199 |
+
pred = ops.convert_to_tensor(pred, name="pred")
|
| 200 |
+
if isinstance(data, tensor_lib.Tensor):
|
| 201 |
+
return gen_control_flow_ops.switch(data, pred, name=name)
|
| 202 |
+
else:
|
| 203 |
+
if not isinstance(data, composite_tensor.CompositeTensor):
|
| 204 |
+
raise TypeError(
|
| 205 |
+
"'data' must be a Tensor or CompositeTensor. "
|
| 206 |
+
f"Received: {type(data)}.")
|
| 207 |
+
tensors = nest.flatten(data, expand_composites=True)
|
| 208 |
+
mapped = [gen_control_flow_ops.switch(tensor, pred) for tensor in tensors]
|
| 209 |
+
mapped_f, mapped_t = zip(*mapped)
|
| 210 |
+
return (nest.pack_sequence_as(data, mapped_f, expand_composites=True),
|
| 211 |
+
nest.pack_sequence_as(data, mapped_t, expand_composites=True))
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _SwitchRefOrTensor(data, pred, name="Switch"):
|
| 215 |
+
"""Forwards `data` to an output determined by `pred`.
|
| 216 |
+
|
| 217 |
+
If `pred` is false, the `data` input is forwarded to the first output.
|
| 218 |
+
Otherwise, the data goes to the second output.
|
| 219 |
+
|
| 220 |
+
This op handles `Tensor`s and `IndexedSlices`.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
data: The tensor to be forwarded to the appropriate output.
|
| 224 |
+
pred: A scalar that specifies which output port will receive data.
|
| 225 |
+
name: A name for this operation (optional).
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
|
| 229 |
+
`output_true`, otherwise it goes to `output_false`.
|
| 230 |
+
|
| 231 |
+
Raises:
|
| 232 |
+
TypeError: if data is not a Tensor or IndexedSlices
|
| 233 |
+
"""
|
| 234 |
+
data = ops.convert_to_tensor_or_composite(data, name="data")
|
| 235 |
+
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
|
| 236 |
+
# addresses the following scenario.
|
| 237 |
+
#
|
| 238 |
+
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
|
| 239 |
+
#
|
| 240 |
+
# 1. The update op is created inside a `with ops.colocate(var):` block
|
| 241 |
+
#
|
| 242 |
+
# 2. Some tensor `data` is captured and a switch is created in a
|
| 243 |
+
# `with ops.colocate_with(data):` block.
|
| 244 |
+
#
|
| 245 |
+
# with ops.colocate_with(var):
|
| 246 |
+
# with ops.colocate_with(data):
|
| 247 |
+
# op = ...
|
| 248 |
+
#
|
| 249 |
+
# var and data may be pinned to different devices, so we want to ops
|
| 250 |
+
# created within ops.colocate_with(data) to ignore the existing stack.
|
| 251 |
+
with ops.colocate_with(data, ignore_existing=True):
|
| 252 |
+
if isinstance(data, tensor_lib.Tensor):
|
| 253 |
+
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
|
| 254 |
+
return ref_switch(data, pred, name=name)
|
| 255 |
+
return switch(data, pred, name=name)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def merge(inputs, name=None):
|
| 259 |
+
"""Returns the value of an available element of `inputs`.
|
| 260 |
+
|
| 261 |
+
This op tests each of the tensors in `inputs` in turn to determine if any of
|
| 262 |
+
them is available. If it finds an available tensor, it returns it and its
|
| 263 |
+
index in `inputs`.
|
| 264 |
+
|
| 265 |
+
It is an error if more than one tensor in `inputs` is available. If no tensor
|
| 266 |
+
in `inputs` is available, the returned tensor and index are not set.
|
| 267 |
+
|
| 268 |
+
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
|
| 269 |
+
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
|
| 270 |
+
before merging.
|
| 271 |
+
|
| 272 |
+
Args:
|
| 273 |
+
inputs: The input tensors, at most one of which is available.
|
| 274 |
+
name: A name for this operation (optional).
|
| 275 |
+
|
| 276 |
+
Returns:
|
| 277 |
+
A tuple containing the chosen input tensor and its index in `inputs`.
|
| 278 |
+
|
| 279 |
+
Raises:
|
| 280 |
+
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
|
| 281 |
+
some but not all have a dense_shape property.
|
| 282 |
+
"""
|
| 283 |
+
if any(inp is None for inp in inputs):
|
| 284 |
+
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
|
| 285 |
+
with ops.name_scope(name, "Merge", inputs) as name:
|
| 286 |
+
inputs = [
|
| 287 |
+
ops.internal_convert_to_tensor_or_composite(inp, as_ref=True)
|
| 288 |
+
for inp in inputs
|
| 289 |
+
]
|
| 290 |
+
if all(isinstance(v, tensor_lib.Tensor) for v in inputs):
|
| 291 |
+
if all(v.dtype._is_ref_dtype for v in inputs): # pylint: disable=protected-access
|
| 292 |
+
return gen_control_flow_ops.ref_merge(inputs, name)
|
| 293 |
+
else:
|
| 294 |
+
return gen_control_flow_ops.merge(inputs, name)
|
| 295 |
+
else:
|
| 296 |
+
# If there is a mix of tensors and indexed slices, then convert the
|
| 297 |
+
# tensors to indexed slices.
|
| 298 |
+
if all(
|
| 299 |
+
isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor))
|
| 300 |
+
for v in inputs):
|
| 301 |
+
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
|
| 302 |
+
|
| 303 |
+
for v in inputs:
|
| 304 |
+
if not isinstance(v, composite_tensor.CompositeTensor):
|
| 305 |
+
raise TypeError("Type %s not supported" % type(v))
|
| 306 |
+
|
| 307 |
+
for v in inputs[1:]:
|
| 308 |
+
nest.assert_same_structure(inputs[0], v, expand_composites=True)
|
| 309 |
+
|
| 310 |
+
flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs]
|
| 311 |
+
merged_results = [
|
| 312 |
+
gen_control_flow_ops.merge(component)
|
| 313 |
+
for component in zip(*flat_inputs)
|
| 314 |
+
]
|
| 315 |
+
flat_merged = [tensor for (tensor, _) in merged_results]
|
| 316 |
+
chosen_index = merged_results[0][1]
|
| 317 |
+
merged_inputs = nest.pack_sequence_as(
|
| 318 |
+
inputs[0], flat_merged, expand_composites=True)
|
| 319 |
+
return (merged_inputs, chosen_index)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def _convert_tensorarray_to_flow(tensor_or_tensor_array):
|
| 323 |
+
if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray):
|
| 324 |
+
return tensor_or_tensor_array.flow
|
| 325 |
+
else:
|
| 326 |
+
return tensor_or_tensor_array
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def _convert_flow_to_tensorarray(tensor_or_tensor_array, tensor_or_flow):
|
| 330 |
+
if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray):
|
| 331 |
+
return tensor_array_ops.build_ta_with_new_flow(tensor_or_tensor_array,
|
| 332 |
+
tensor_or_flow)
|
| 333 |
+
else:
|
| 334 |
+
return tensor_or_flow
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def _convert_to_tensor_or_composite_or_tensorarray(var):
|
| 338 |
+
if isinstance(var, tensor_array_ops.TensorArray):
|
| 339 |
+
return var
|
| 340 |
+
return ops.convert_to_tensor_or_composite(var)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
# TODO(xjun): replace this with is_subtype_of after it is landed.
|
| 344 |
+
def _ShapeLessThanOrEqual(shape1, shape2):
|
| 345 |
+
if shape2.dims is None:
|
| 346 |
+
return True
|
| 347 |
+
if shape1.ndims != shape2.ndims:
|
| 348 |
+
return False
|
| 349 |
+
for dim1, dim2 in zip(shape1.dims, shape2.dims):
|
| 350 |
+
if dim2.value is not None and dim1.value != dim2.value:
|
| 351 |
+
return False
|
| 352 |
+
return True
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _shape_invariant_to_type_spec(var, shape=None):
|
| 356 |
+
"""Converts a shape invariant to a TypeSpec.
|
| 357 |
+
|
| 358 |
+
If `var` is a TensorArray, it will first be converted to its flow.
|
| 359 |
+
|
| 360 |
+
Args:
|
| 361 |
+
var: The tensor, tensor array or composite tensor whose shape is described
|
| 362 |
+
by the shape invariant.
|
| 363 |
+
shape: A `TypeSpec` or `TensorShape`. If `shape` is already a `TypeSpec`,
|
| 364 |
+
then it is simply returned as-is.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
A `TypeSpec` for `var`, consistent with the given shape.
|
| 368 |
+
|
| 369 |
+
Raises:
|
| 370 |
+
TypeError: If `shape` is a TypeSpec and not compatible with `var`.
|
| 371 |
+
TypeError: If `shape` is not None, a TypeSpec, or a TensorShape.
|
| 372 |
+
TypeError: If `shape` is a TensorShape, `var` is a CompositeTensor, and
|
| 373 |
+
`var` doesn't implement the `_shape_invariant_to_type_spec` method.
|
| 374 |
+
"""
|
| 375 |
+
var = _convert_tensorarray_to_flow(var)
|
| 376 |
+
if shape is None:
|
| 377 |
+
return type_spec.type_spec_from_value(var)
|
| 378 |
+
elif isinstance(shape, type_spec.TypeSpec):
|
| 379 |
+
if not shape.is_compatible_with(var):
|
| 380 |
+
raise TypeError("TypeSpec %r is not compatible with %r" % (shape, var))
|
| 381 |
+
return shape
|
| 382 |
+
elif not isinstance(shape, tensor_shape.TensorShape):
|
| 383 |
+
raise TypeError(
|
| 384 |
+
"'shape' must be one of TypeSpec, TensorShape or None. "
|
| 385 |
+
f"Received: {type(shape)}")
|
| 386 |
+
|
| 387 |
+
if isinstance(var, tensor_lib.Tensor):
|
| 388 |
+
return tensor_lib.TensorSpec(shape, var.dtype)
|
| 389 |
+
else:
|
| 390 |
+
try:
|
| 391 |
+
return var._shape_invariant_to_type_spec(shape) # pylint: disable=protected-access
|
| 392 |
+
except NotImplementedError as e:
|
| 393 |
+
raise TypeError(
|
| 394 |
+
f"To describe or constrain a {type(var).__name__}, use a "
|
| 395 |
+
f"{type(var._type_spec).__name__} instead of a TensorShape.") from e # pylint: disable=protected-access
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def _EnforceShapeInvariant(merge_var, next_var):
|
| 399 |
+
"""Check if the shapes of the loops variables are invariants.
|
| 400 |
+
|
| 401 |
+
Args:
|
| 402 |
+
merge_var: The tensor representing the initial values of the loop
|
| 403 |
+
variables.
|
| 404 |
+
next_var: The tensor representing the values of the loop variables
|
| 405 |
+
after one loop iteration.
|
| 406 |
+
|
| 407 |
+
Raises:
|
| 408 |
+
ValueError: If any tensor in `merge_var` has a more specific shape than
|
| 409 |
+
its corresponding tensor in `next_var`.
|
| 410 |
+
"""
|
| 411 |
+
if isinstance(merge_var, tensor_lib.Tensor):
|
| 412 |
+
m_shape = merge_var.get_shape()
|
| 413 |
+
n_shape = next_var.get_shape()
|
| 414 |
+
if not _ShapeLessThanOrEqual(n_shape, m_shape):
|
| 415 |
+
enter = merge_var.op.inputs[0].op
|
| 416 |
+
assert util.IsLoopEnter(enter)
|
| 417 |
+
input_t = enter.inputs[0]
|
| 418 |
+
raise ValueError(
|
| 419 |
+
"Input tensor '%s' enters the loop with shape %s, but has shape %s "
|
| 420 |
+
"after one iteration. To allow the shape to vary across iterations, "
|
| 421 |
+
"use the `shape_invariants` argument of tf.while_loop to specify a "
|
| 422 |
+
"less-specific shape." % (input_t.name, input_t.shape, n_shape))
|
| 423 |
+
else:
|
| 424 |
+
raise TypeError("'merge_var' must be a Tensor. "
|
| 425 |
+
f"Received: {type(merge_var)}.")
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True):
|
| 429 |
+
"""Add NextIteration and back edge from v to m."""
|
| 430 |
+
if isinstance(m, tensor_lib.Tensor):
|
| 431 |
+
v = ops.convert_to_tensor(v)
|
| 432 |
+
v = _NextIteration(v)
|
| 433 |
+
if enforce_shape_invariant:
|
| 434 |
+
# Make sure the shapes of loop outputs are correct. We do this before
|
| 435 |
+
# calling _update_input, which will raise a less-helpful error message if
|
| 436 |
+
# the types don't match.
|
| 437 |
+
# TODO(skyewm): call this for other cases below (needs testing)
|
| 438 |
+
_EnforceShapeInvariant(m, v)
|
| 439 |
+
m.op._update_input(1, v) # pylint: disable=protected-access
|
| 440 |
+
elif isinstance(m, composite_tensor.CompositeTensor):
|
| 441 |
+
# pylint: disable=protected-access
|
| 442 |
+
def update_component(m_component, v_component):
|
| 443 |
+
m_component.op._update_input(1, v_component)
|
| 444 |
+
|
| 445 |
+
if isinstance(m, indexed_slices.IndexedSlices):
|
| 446 |
+
v = math_ops._as_indexed_slices(v, optimize=False)
|
| 447 |
+
# pylint: enable=protected-access
|
| 448 |
+
v = _NextIteration(v)
|
| 449 |
+
return nest.map_structure(update_component, m, v, expand_composites=True)
|
| 450 |
+
else:
|
| 451 |
+
raise TypeError("'m' must be a Tensor or CompositeTensor. "
|
| 452 |
+
f"Received: {type(m)}.")
|
| 453 |
+
return v
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class ControlFlowContext(metaclass=abc.ABCMeta):
|
| 457 |
+
"""The base class for control flow context.
|
| 458 |
+
|
| 459 |
+
The usage pattern is a sequence of (Enter, Exit) followed by a final
|
| 460 |
+
ExitResult.
|
| 461 |
+
|
| 462 |
+
We maintain the following state for control flow contexts during graph
|
| 463 |
+
construction:
|
| 464 |
+
1. graph has _control_flow_context: the current context used to
|
| 465 |
+
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
|
| 466 |
+
2. op has _control_flow_context: the context to which the op belongs.
|
| 467 |
+
Set at the time the op is created. Immutable.
|
| 468 |
+
3. A ControlFlowContext has _outer_context: the context in which this
|
| 469 |
+
context is created. Set at the time a context is created. Immutable.
|
| 470 |
+
4. A ControlFlowContext has _context_stack.
|
| 471 |
+
Pushed and popped by ctxt.Enter() and ctxt.Exit()
|
| 472 |
+
"""
|
| 473 |
+
|
| 474 |
+
def __init__(self, values_def=None, import_scope=None):
|
| 475 |
+
self._nested_contexts = []
|
| 476 |
+
self._outer_context = ops.get_default_graph()._get_control_flow_context()
|
| 477 |
+
if self._outer_context:
|
| 478 |
+
self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access
|
| 479 |
+
self._context_stack = []
|
| 480 |
+
if values_def:
|
| 481 |
+
self._init_values_from_proto(values_def, import_scope=import_scope)
|
| 482 |
+
else:
|
| 483 |
+
# The names of tensors that have been already seen in this context.
|
| 484 |
+
self._values = set()
|
| 485 |
+
# The keys are the names of tensors referenced by but external to this
|
| 486 |
+
# context. Each value is the Tensor that should be used by this context to
|
| 487 |
+
# access the key value (e.g. a switch output guarding a cond input value).
|
| 488 |
+
self._external_values = {}
|
| 489 |
+
|
| 490 |
+
def _init_values_from_proto(self, values_def, import_scope=None):
|
| 491 |
+
"""Initializes values and external_values from `ValuesDef` protocol buffer.
|
| 492 |
+
|
| 493 |
+
Args:
|
| 494 |
+
values_def: `ValuesDef` protocol buffer.
|
| 495 |
+
import_scope: Optional `string`. Name scope to add.
|
| 496 |
+
"""
|
| 497 |
+
assert isinstance(values_def, control_flow_pb2.ValuesDef)
|
| 498 |
+
self._values = set(
|
| 499 |
+
ops.prepend_name_scope(value, import_scope)
|
| 500 |
+
for value in values_def.values)
|
| 501 |
+
g = ops.get_default_graph()
|
| 502 |
+
self._external_values = {}
|
| 503 |
+
for k, v in values_def.external_values.items():
|
| 504 |
+
k = ops.prepend_name_scope(k, import_scope)
|
| 505 |
+
self._external_values[k] = g.as_graph_element(
|
| 506 |
+
ops.prepend_name_scope(v, import_scope))
|
| 507 |
+
op_names = set([
|
| 508 |
+
op.split(":")[0]
|
| 509 |
+
for op in self._values - set(self._external_values.keys())
|
| 510 |
+
])
|
| 511 |
+
for op in op_names:
|
| 512 |
+
# pylint: disable=protected-access
|
| 513 |
+
g.as_graph_element(op)._set_control_flow_context(self)
|
| 514 |
+
# pylint: enable=protected-access
|
| 515 |
+
|
| 516 |
+
@property
|
| 517 |
+
def name(self):
|
| 518 |
+
return self._name
|
| 519 |
+
|
| 520 |
+
@property
|
| 521 |
+
def outer_context(self):
|
| 522 |
+
"""Return the context containing this context."""
|
| 523 |
+
return self._outer_context
|
| 524 |
+
|
| 525 |
+
@property
|
| 526 |
+
def grad_state(self):
|
| 527 |
+
raise NotImplementedError("Abstract method")
|
| 528 |
+
|
| 529 |
+
@property
|
| 530 |
+
def back_prop(self):
|
| 531 |
+
raise NotImplementedError("Abstract method")
|
| 532 |
+
|
| 533 |
+
@abc.abstractmethod
|
| 534 |
+
def to_control_flow_context_def(self, context_def, export_scope=None):
|
| 535 |
+
"""Serializes this into `context_def`.
|
| 536 |
+
|
| 537 |
+
Args:
|
| 538 |
+
context_def: a `ControlFlowContextDef` protocol buffer.
|
| 539 |
+
export_scope: Optional `string`. Name scope to remove.
|
| 540 |
+
"""
|
| 541 |
+
raise NotImplementedError("Abstract method")
|
| 542 |
+
|
| 543 |
+
def _to_values_def(self, export_scope=None):
|
| 544 |
+
"""Converts the values to a `ValuesDef` protocol buffer.
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
export_scope: Optional `string`. Name scope to remove.
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
A `ValuesDef` protocol buffer.
|
| 551 |
+
"""
|
| 552 |
+
values_def = control_flow_pb2.ValuesDef()
|
| 553 |
+
values_def.values.extend(
|
| 554 |
+
[ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])
|
| 555 |
+
for k, v in self._external_values.items():
|
| 556 |
+
k = ops.strip_name_scope(k, export_scope)
|
| 557 |
+
values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)
|
| 558 |
+
return values_def
|
| 559 |
+
|
| 560 |
+
def AddName(self, name):
|
| 561 |
+
self._values.add(name)
|
| 562 |
+
|
| 563 |
+
# pylint: disable=protected-access
|
| 564 |
+
def Enter(self):
|
| 565 |
+
"""Enter this control flow context."""
|
| 566 |
+
graph = ops.get_default_graph()
|
| 567 |
+
self._context_stack.append(graph._get_control_flow_context())
|
| 568 |
+
graph._set_control_flow_context(self)
|
| 569 |
+
|
| 570 |
+
def Exit(self):
|
| 571 |
+
"""Exit this control flow context."""
|
| 572 |
+
graph = ops.get_default_graph()
|
| 573 |
+
last_context = self._context_stack.pop()
|
| 574 |
+
graph._set_control_flow_context(last_context)
|
| 575 |
+
|
| 576 |
+
def EnterGradientColocation(self, op: ops.Operation, gradient_uid):
|
| 577 |
+
"""Start building a gradient colocated with an op."""
|
| 578 |
+
if self._outer_context:
|
| 579 |
+
self._outer_context.EnterGradientColocation(op, gradient_uid)
|
| 580 |
+
|
| 581 |
+
def ExitGradientColocation(self, op: ops.Operation, gradient_uid):
|
| 582 |
+
"""Start building a gradient colocated with an op."""
|
| 583 |
+
if self._outer_context:
|
| 584 |
+
self._outer_context.ExitGradientColocation(op, gradient_uid)
|
| 585 |
+
|
| 586 |
+
def ExitResult(self, result):
|
| 587 |
+
"""Make a list of tensors available in the outer context."""
|
| 588 |
+
if self._outer_context:
|
| 589 |
+
def fn(x):
|
| 590 |
+
self._outer_context.AddName(x.name)
|
| 591 |
+
return x
|
| 592 |
+
nest.map_structure(fn, result, expand_composites=True)
|
| 593 |
+
|
| 594 |
+
def GetWhileContext(self):
|
| 595 |
+
"""Return the while context containing this context."""
|
| 596 |
+
if self._outer_context:
|
| 597 |
+
return self._outer_context.GetWhileContext()
|
| 598 |
+
return None
|
| 599 |
+
|
| 600 |
+
def _RemoveExternalControlEdges(self, op: ops.Operation):
|
| 601 |
+
"""Remove any external control dependency on this op."""
|
| 602 |
+
while_ctxt = self.GetWhileContext()
|
| 603 |
+
# A control input of `op` is internal if it is in the same while
|
| 604 |
+
# loop context as the enclosing while loop context of self.
|
| 605 |
+
if while_ctxt is None:
|
| 606 |
+
internal_control_inputs, external_control_inputs = op.control_inputs, []
|
| 607 |
+
else:
|
| 608 |
+
internal_control_inputs, external_control_inputs = [], []
|
| 609 |
+
for x in op.control_inputs:
|
| 610 |
+
ctxt = util.GetOutputContext(x)
|
| 611 |
+
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
|
| 612 |
+
internal_control_inputs.append(x)
|
| 613 |
+
else:
|
| 614 |
+
external_control_inputs.append(x)
|
| 615 |
+
if len(internal_control_inputs) != len(op.control_inputs):
|
| 616 |
+
# TODO(mdan): perhaps there should be a replace_control_inputs()
|
| 617 |
+
op._remove_all_control_inputs()
|
| 618 |
+
op._add_control_inputs(internal_control_inputs)
|
| 619 |
+
return internal_control_inputs, external_control_inputs
|
| 620 |
+
|
| 621 |
+
# pylint: enable=protected-access
|
| 622 |
+
|
| 623 |
+
def AddInnerOp(self, op: ops.Operation):
|
| 624 |
+
"""Notifies a scope about an operator added to an inner scope."""
|
| 625 |
+
if self._outer_context:
|
| 626 |
+
self._outer_context.AddInnerOp(op)
|
| 627 |
+
|
| 628 |
+
def GetControlPivot(self):
|
| 629 |
+
"""Returns the pivot node for this context, or None."""
|
| 630 |
+
return None
|
| 631 |
+
|
| 632 |
+
def IsWhileContext(self):
|
| 633 |
+
return False
|
| 634 |
+
|
| 635 |
+
def IsCondContext(self):
|
| 636 |
+
return False
|
| 637 |
+
|
| 638 |
+
def IsXLAContext(self):
|
| 639 |
+
return False
|
| 640 |
+
|
| 641 |
+
def __str__(self):
|
| 642 |
+
return self.name
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class CondContext(ControlFlowContext):
|
| 646 |
+
"""The context for the conditional construct."""
|
| 647 |
+
|
| 648 |
+
def __init__(self,
|
| 649 |
+
pred=None,
|
| 650 |
+
pivot=None,
|
| 651 |
+
branch=None,
|
| 652 |
+
name="cond_text",
|
| 653 |
+
context_def=None,
|
| 654 |
+
import_scope=None):
|
| 655 |
+
"""Creates a `CondContext`.
|
| 656 |
+
|
| 657 |
+
Args:
|
| 658 |
+
pred: The `boolean` tensor for the conditional predicate.
|
| 659 |
+
pivot: The predicate tensor in this branch.
|
| 660 |
+
branch: 0 or 1 representing this branch.
|
| 661 |
+
name: Name of the `CondContext` python object.
|
| 662 |
+
context_def: Optional `ContextDef` protocol buffer to initialize the
|
| 663 |
+
`CondContext` object from.
|
| 664 |
+
import_scope: Optional `string`. Name scope to add. Only used when
|
| 665 |
+
initialing from protocol buffer.
|
| 666 |
+
"""
|
| 667 |
+
self._name = ops.get_default_graph().unique_name(name)
|
| 668 |
+
|
| 669 |
+
if context_def:
|
| 670 |
+
self._init_from_proto(context_def, import_scope=import_scope)
|
| 671 |
+
else:
|
| 672 |
+
# Initializes the default fields.
|
| 673 |
+
ControlFlowContext.__init__(self)
|
| 674 |
+
self._pred = pred # The boolean tensor for the cond predicate
|
| 675 |
+
self._pivot = pivot # The predicate tensor in this branch
|
| 676 |
+
self._branch = branch # 0 or 1 representing this branch
|
| 677 |
+
|
| 678 |
+
# Values considered to have been already seen in this context. pred is not
|
| 679 |
+
# included in this context.
|
| 680 |
+
self._values.add(pred.name)
|
| 681 |
+
self._external_values[pred.name] = pred
|
| 682 |
+
self._values.add(pivot.name)
|
| 683 |
+
pivot.op._set_control_flow_context(self) # pylint: disable=protected-access
|
| 684 |
+
|
| 685 |
+
def _init_from_proto(self, context_def, import_scope=None):
|
| 686 |
+
"""Creates a new `CondContext` from protocol buffer.
|
| 687 |
+
|
| 688 |
+
Args:
|
| 689 |
+
context_def: `CondContextDef` protocol buffer.
|
| 690 |
+
import_scope: Optional `string`. Name scope to add.
|
| 691 |
+
"""
|
| 692 |
+
assert isinstance(context_def, control_flow_pb2.CondContextDef)
|
| 693 |
+
# Create from context_def.
|
| 694 |
+
g = ops.get_default_graph()
|
| 695 |
+
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
|
| 696 |
+
self._pred = g.as_graph_element(
|
| 697 |
+
ops.prepend_name_scope(context_def.pred_name, import_scope))
|
| 698 |
+
self._pivot = g.as_graph_element(
|
| 699 |
+
ops.prepend_name_scope(context_def.pivot_name, import_scope))
|
| 700 |
+
self._branch = context_def.branch
|
| 701 |
+
super(CondContext, self).__init__(
|
| 702 |
+
values_def=context_def.values_def, import_scope=import_scope)
|
| 703 |
+
|
| 704 |
+
@property
|
| 705 |
+
def pred(self):
|
| 706 |
+
return self._pred
|
| 707 |
+
|
| 708 |
+
@property
|
| 709 |
+
def pivot(self):
|
| 710 |
+
return self._pivot
|
| 711 |
+
|
| 712 |
+
@property
|
| 713 |
+
def branch(self):
|
| 714 |
+
return self._branch
|
| 715 |
+
|
| 716 |
+
@property
|
| 717 |
+
def grad_state(self):
|
| 718 |
+
if self.GetWhileContext():
|
| 719 |
+
return self.GetWhileContext().grad_state
|
| 720 |
+
return None
|
| 721 |
+
|
| 722 |
+
@property
|
| 723 |
+
def back_prop(self):
|
| 724 |
+
if self.GetWhileContext():
|
| 725 |
+
return self.GetWhileContext().back_prop
|
| 726 |
+
return False
|
| 727 |
+
|
| 728 |
+
def GetControlPivot(self):
|
| 729 |
+
return self._pivot
|
| 730 |
+
|
| 731 |
+
def to_proto(self, export_scope=None):
|
| 732 |
+
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
|
| 733 |
+
|
| 734 |
+
Args:
|
| 735 |
+
export_scope: Optional `string`. Name scope to remove.
|
| 736 |
+
|
| 737 |
+
Returns:
|
| 738 |
+
A `CondContextDef` protocol buffer.
|
| 739 |
+
"""
|
| 740 |
+
if (export_scope is None or self.name.startswith(export_scope)):
|
| 741 |
+
context_def = control_flow_pb2.CondContextDef()
|
| 742 |
+
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
|
| 743 |
+
context_def.pred_name = ops.strip_name_scope(self._pred.name,
|
| 744 |
+
export_scope)
|
| 745 |
+
context_def.pivot_name = ops.strip_name_scope(self._pivot.name,
|
| 746 |
+
export_scope)
|
| 747 |
+
context_def.branch = self._branch
|
| 748 |
+
context_def.values_def.MergeFrom(
|
| 749 |
+
super(CondContext, self)._to_values_def(export_scope))
|
| 750 |
+
for nested in self._nested_contexts:
|
| 751 |
+
nested_def = context_def.nested_contexts.add()
|
| 752 |
+
nested.to_control_flow_context_def(nested_def)
|
| 753 |
+
|
| 754 |
+
return context_def
|
| 755 |
+
else:
|
| 756 |
+
return None
|
| 757 |
+
|
| 758 |
+
@staticmethod
|
| 759 |
+
def from_proto(context_def, import_scope=None):
|
| 760 |
+
"""Returns a `CondContext` object created from `context_def`."""
|
| 761 |
+
ret = CondContext(context_def=context_def, import_scope=import_scope)
|
| 762 |
+
|
| 763 |
+
ret.Enter()
|
| 764 |
+
for nested_def in context_def.nested_contexts:
|
| 765 |
+
from_control_flow_context_def(nested_def, import_scope=import_scope)
|
| 766 |
+
ret.Exit()
|
| 767 |
+
return ret
|
| 768 |
+
|
| 769 |
+
def to_control_flow_context_def(self, context_def, export_scope=None):
|
| 770 |
+
context_def.cond_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))
|
| 771 |
+
|
| 772 |
+
def AddValue(self, val):
|
| 773 |
+
"""Add `val` to the current context and its outer context recursively."""
|
| 774 |
+
if val.name in self._values:
|
| 775 |
+
# Use the real value if it comes from outer context. This is needed in
|
| 776 |
+
# particular for nested conds.
|
| 777 |
+
result = self._external_values.get(val.name)
|
| 778 |
+
result = val if result is None else result
|
| 779 |
+
else:
|
| 780 |
+
result = val
|
| 781 |
+
self._values.add(val.name)
|
| 782 |
+
if self._outer_context:
|
| 783 |
+
result = self._outer_context.AddValue(val)
|
| 784 |
+
self._values.add(result.name)
|
| 785 |
+
self._external_values[result.name] = result
|
| 786 |
+
with ops.control_dependencies(None):
|
| 787 |
+
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
|
| 788 |
+
if self._outer_context:
|
| 789 |
+
self._outer_context.AddInnerOp(result.op)
|
| 790 |
+
|
| 791 |
+
result.op.graph.prevent_fetching(result.op)
|
| 792 |
+
# pylint: disable=protected-access
|
| 793 |
+
result.op._set_control_flow_context(self)
|
| 794 |
+
# pylint: enable=protected-access
|
| 795 |
+
|
| 796 |
+
# Mark Switch output as seen by this context and any outer contexts,
|
| 797 |
+
# just like what we do for normal op outputs in _AddOpInternal() below.
|
| 798 |
+
ctxt = self
|
| 799 |
+
while ctxt is not None:
|
| 800 |
+
# pylint: disable=protected-access
|
| 801 |
+
ctxt._values.add(result.name)
|
| 802 |
+
ctxt = ctxt._outer_context
|
| 803 |
+
# pylint: enable=protected-access
|
| 804 |
+
|
| 805 |
+
self._external_values[val.name] = result
|
| 806 |
+
return result
|
| 807 |
+
|
| 808 |
+
def AddOp(self, op: ops.Operation):
|
| 809 |
+
self._AddOpInternal(op)
|
| 810 |
+
|
| 811 |
+
def _AddOpInternal(self, op: ops.Operation):
|
| 812 |
+
"""Add `op` to the current context."""
|
| 813 |
+
if not op.inputs:
|
| 814 |
+
# If we're in a while loop, remove any control inputs from outside the
|
| 815 |
+
# loop.
|
| 816 |
+
self._RemoveExternalControlEdges(op)
|
| 817 |
+
|
| 818 |
+
if not any(
|
| 819 |
+
util.OpInContext(input_op, self) for input_op in op.control_inputs):
|
| 820 |
+
# pylint: disable=protected-access
|
| 821 |
+
op._add_control_input(self._pivot.op)
|
| 822 |
+
# pylint: enable=protected-access
|
| 823 |
+
else:
|
| 824 |
+
# Make each input to 'op' available in this CondContext. If an input is
|
| 825 |
+
# already part of this context there's nothing to do, but if it's
|
| 826 |
+
# external, AddValue() will handle adding the appropriate Switch node and
|
| 827 |
+
# other bookkeeping.
|
| 828 |
+
for index in range(len(op.inputs)):
|
| 829 |
+
x = op.inputs[index]
|
| 830 |
+
if op.type == "Merge" and x.op.type == "NextIteration":
|
| 831 |
+
# Edge case: if we're importing a while loop inside this CondContext,
|
| 832 |
+
# AddValue() will not correctly handle the NextIteration inputs to
|
| 833 |
+
# Merge node. The problem is that the NextIteration should also be
|
| 834 |
+
# part of this context, but if we're importing it won't have been
|
| 835 |
+
# processed and added to the context yet, so AddValue() will try to
|
| 836 |
+
# add a Switch which results in an invalid graph. Instead, we use the
|
| 837 |
+
# NextIteration input as-is here, and it will eventually be added to
|
| 838 |
+
# the context via AddOp().
|
| 839 |
+
real_x = x
|
| 840 |
+
else:
|
| 841 |
+
real_x = self.AddValue(x)
|
| 842 |
+
if real_x != x:
|
| 843 |
+
# pylint: disable=protected-access
|
| 844 |
+
op._update_input(index, real_x)
|
| 845 |
+
# pylint: enable=protected-access
|
| 846 |
+
# Remove any external control dependency on this op.
|
| 847 |
+
self._RemoveExternalControlEdges(op)
|
| 848 |
+
# pylint: disable=protected-access
|
| 849 |
+
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
|
| 850 |
+
op._add_control_input(self._pivot.op)
|
| 851 |
+
# pylint: enable=protected-access
|
| 852 |
+
|
| 853 |
+
# Mark op's outputs as seen by this context and any outer contexts.
|
| 854 |
+
output_names = [x.name for x in op.outputs]
|
| 855 |
+
ctxt = self
|
| 856 |
+
while ctxt is not None:
|
| 857 |
+
# pylint: disable=protected-access
|
| 858 |
+
ctxt._values.update(output_names)
|
| 859 |
+
ctxt = ctxt._outer_context
|
| 860 |
+
# pylint: enable=protected-access
|
| 861 |
+
|
| 862 |
+
if self._outer_context or not util.IsLoopExit(op):
|
| 863 |
+
op.graph.prevent_fetching(op)
|
| 864 |
+
|
| 865 |
+
if self._outer_context:
|
| 866 |
+
self._outer_context.AddInnerOp(op)
|
| 867 |
+
|
| 868 |
+
def _ProcessOutputTensor(self, val):
|
| 869 |
+
"""Process an output tensor of a conditional branch."""
|
| 870 |
+
real_val = val
|
| 871 |
+
if val.name not in self._values:
|
| 872 |
+
# Handle the special case of lambda: x
|
| 873 |
+
self._values.add(val.name)
|
| 874 |
+
if self._outer_context:
|
| 875 |
+
real_val = self._outer_context.AddValue(val)
|
| 876 |
+
self._values.add(real_val.name)
|
| 877 |
+
self._external_values[real_val.name] = real_val
|
| 878 |
+
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
|
| 879 |
+
self._external_values[val.name] = real_val
|
| 880 |
+
else:
|
| 881 |
+
external_val = self._external_values.get(val.name)
|
| 882 |
+
if external_val is not None:
|
| 883 |
+
real_val = external_val
|
| 884 |
+
return real_val
|
| 885 |
+
|
| 886 |
+
def _BuildCondTensor(self, v):
|
| 887 |
+
if isinstance(v, ops.Operation):
|
| 888 |
+
# Use pivot as the proxy for this op.
|
| 889 |
+
return with_dependencies([v], self._pivot)
|
| 890 |
+
else:
|
| 891 |
+
v = nest.map_structure(
|
| 892 |
+
_convert_tensorarray_to_flow, v, expand_composites=True)
|
| 893 |
+
return self._ProcessOutputTensor(ops.convert_to_tensor(v))
|
| 894 |
+
|
| 895 |
+
def BuildCondBranch(self, fn):
|
| 896 |
+
"""Add the subgraph defined by fn() to the graph."""
|
| 897 |
+
pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 898 |
+
original_result = fn()
|
| 899 |
+
post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 900 |
+
if len(post_summaries) > len(pre_summaries):
|
| 901 |
+
new_summaries = post_summaries[len(pre_summaries):]
|
| 902 |
+
summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 903 |
+
summary_ref[:] = pre_summaries
|
| 904 |
+
with ops.control_dependencies(new_summaries):
|
| 905 |
+
if original_result is None:
|
| 906 |
+
return no_op(), None
|
| 907 |
+
elif not isinstance(original_result, ops.Operation):
|
| 908 |
+
original_result = variable_utils.convert_variables_to_tensors(
|
| 909 |
+
original_result)
|
| 910 |
+
original_result = nest.map_structure(
|
| 911 |
+
array_ops.identity, original_result, expand_composites=True)
|
| 912 |
+
if original_result is None:
|
| 913 |
+
return None, None
|
| 914 |
+
|
| 915 |
+
original_result = variable_utils.convert_variables_to_tensors(
|
| 916 |
+
original_result)
|
| 917 |
+
result = nest.map_structure(
|
| 918 |
+
self._BuildCondTensor, original_result, expand_composites=True)
|
| 919 |
+
if not isinstance(result, (list, _basetuple)):
|
| 920 |
+
result = [result]
|
| 921 |
+
return original_result, result
|
| 922 |
+
|
| 923 |
+
def IsCondContext(self):
|
| 924 |
+
return True
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
# pylint: enable=g-doc-args
|
| 928 |
+
# pylint: enable=redefined-outer-name
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
def _resource_safe_shape(t):
|
| 932 |
+
"""Returns the shape of t or the variable it points to."""
|
| 933 |
+
if t.dtype == dtypes.resource:
|
| 934 |
+
while t.op.inputs:
|
| 935 |
+
t = t.op.inputs[0]
|
| 936 |
+
return tensor_shape.TensorShape(t.op.get_attr("shape"))
|
| 937 |
+
return array_ops.shape_internal(t, optimize=False)
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
# TODO(yuanbyu): Consider having a unified notion of context for
|
| 941 |
+
# not only conditionals and loops but also control dependency and
|
| 942 |
+
# subgraphs.
|
| 943 |
+
class WhileContext(ControlFlowContext):
|
| 944 |
+
"""The context for the loop construct."""
|
| 945 |
+
|
| 946 |
+
def __init__(self,
|
| 947 |
+
maximum_iterations=None,
|
| 948 |
+
parallel_iterations=10,
|
| 949 |
+
back_prop=True,
|
| 950 |
+
swap_memory=False,
|
| 951 |
+
name="while_context",
|
| 952 |
+
grad_state=None,
|
| 953 |
+
context_def=None,
|
| 954 |
+
import_scope=None):
|
| 955 |
+
""""Creates a `WhileContext`.
|
| 956 |
+
|
| 957 |
+
Args:
|
| 958 |
+
maximum_iterations: Optional upper bound on number of loop iterations.
|
| 959 |
+
parallel_iterations: The number of iterations allowed to run in parallel.
|
| 960 |
+
back_prop: Whether backprop is enabled for this while loop.
|
| 961 |
+
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
|
| 962 |
+
name: Optional name prefix for the returned tensors.
|
| 963 |
+
grad_state: The gradient loop state.
|
| 964 |
+
context_def: Optional `WhileContextDef` protocol buffer to initialize the
|
| 965 |
+
`Whilecontext` python object from.
|
| 966 |
+
import_scope: Optional `string`. Name scope to add. Only used when
|
| 967 |
+
initialing from protocol buffer.
|
| 968 |
+
"""
|
| 969 |
+
if context_def:
|
| 970 |
+
self._init_from_proto(context_def, import_scope=import_scope)
|
| 971 |
+
else:
|
| 972 |
+
ControlFlowContext.__init__(self)
|
| 973 |
+
self._init_from_args(maximum_iterations, parallel_iterations, back_prop,
|
| 974 |
+
swap_memory, name)
|
| 975 |
+
# The gradient loop state.
|
| 976 |
+
self._grad_state = grad_state
|
| 977 |
+
|
| 978 |
+
def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop,
|
| 979 |
+
swap_memory, name):
|
| 980 |
+
"""Creates a new `WhileContext` from arguments.
|
| 981 |
+
|
| 982 |
+
Args:
|
| 983 |
+
maximum_iterations: Optional upper bound on number of loop iterations.
|
| 984 |
+
parallel_iterations: The number of iterations allowed to run in parallel.
|
| 985 |
+
back_prop: Whether backprop is enabled for this while loop.
|
| 986 |
+
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
|
| 987 |
+
name: Optional name prefix for the returned tensors.
|
| 988 |
+
|
| 989 |
+
Raises:
|
| 990 |
+
ValueError: If `parallel_iterations` has invalid value.
|
| 991 |
+
"""
|
| 992 |
+
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
|
| 993 |
+
raise ValueError("'parallel_iterations' must be a positive integer: "
|
| 994 |
+
"%s" % parallel_iterations)
|
| 995 |
+
self._name = ops.get_default_graph().unique_name(name)
|
| 996 |
+
self._maximum_iterations = maximum_iterations
|
| 997 |
+
self._parallel_iterations = parallel_iterations
|
| 998 |
+
self._back_prop = back_prop
|
| 999 |
+
self._swap_memory = swap_memory
|
| 1000 |
+
# We use this node to control constants created by the pred lambda.
|
| 1001 |
+
self._pivot_for_pred = None
|
| 1002 |
+
# We use this node to control constants created by the body lambda.
|
| 1003 |
+
self._pivot_for_body = None
|
| 1004 |
+
# The boolean tensor for loop termination condition. Used in code
|
| 1005 |
+
# generation for gradient computation
|
| 1006 |
+
self._pivot = None
|
| 1007 |
+
# The list of exit tensors for loop variables.
|
| 1008 |
+
self._loop_exits = []
|
| 1009 |
+
# The list of enter tensors for loop variables.
|
| 1010 |
+
self._loop_enters = []
|
| 1011 |
+
self._graph = ops.get_default_graph()
|
| 1012 |
+
|
| 1013 |
+
def _init_from_proto(self, context_def, import_scope=None):
|
| 1014 |
+
"""Creates a new `WhileContext` from protocol buffer.
|
| 1015 |
+
|
| 1016 |
+
Args:
|
| 1017 |
+
context_def: `WhileContextDef` protocol buffer.
|
| 1018 |
+
import_scope: Optional `string`. Name scope to add.
|
| 1019 |
+
"""
|
| 1020 |
+
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
|
| 1021 |
+
# Create from context_def.
|
| 1022 |
+
g = ops.get_default_graph()
|
| 1023 |
+
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
|
| 1024 |
+
if context_def.maximum_iterations_name:
|
| 1025 |
+
self._maximum_iterations = g.as_graph_element(
|
| 1026 |
+
ops.prepend_name_scope(context_def.maximum_iterations_name,
|
| 1027 |
+
import_scope))
|
| 1028 |
+
else:
|
| 1029 |
+
self._maximum_iterations = None
|
| 1030 |
+
self._parallel_iterations = context_def.parallel_iterations
|
| 1031 |
+
self._back_prop = context_def.back_prop
|
| 1032 |
+
self._swap_memory = context_def.swap_memory
|
| 1033 |
+
self._pivot_for_pred = g.as_graph_element(
|
| 1034 |
+
ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope))
|
| 1035 |
+
# We use this node to control constants created by the body lambda.
|
| 1036 |
+
self._pivot_for_body = g.as_graph_element(
|
| 1037 |
+
ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope))
|
| 1038 |
+
# The boolean tensor for loop termination condition. Used in code
|
| 1039 |
+
# generation for gradient computation.
|
| 1040 |
+
self._pivot = g.as_graph_element(
|
| 1041 |
+
ops.prepend_name_scope(context_def.pivot_name, import_scope))
|
| 1042 |
+
# The list of exit tensors for loop variables.
|
| 1043 |
+
self._loop_exits = [
|
| 1044 |
+
g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope))
|
| 1045 |
+
for exit_name in context_def.loop_exit_names
|
| 1046 |
+
]
|
| 1047 |
+
# The list of enter tensors for loop variables.
|
| 1048 |
+
self._loop_enters = [
|
| 1049 |
+
g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope))
|
| 1050 |
+
for enter_name in context_def.loop_enter_names
|
| 1051 |
+
]
|
| 1052 |
+
super(WhileContext, self).__init__(
|
| 1053 |
+
values_def=context_def.values_def, import_scope=import_scope)
|
| 1054 |
+
|
| 1055 |
+
# import_scope causes self.name to be different from the original serialized
|
| 1056 |
+
# context's name. Rewrite "frame_name" attrs with the new name.
|
| 1057 |
+
if import_scope:
|
| 1058 |
+
for tensor_name in self._values:
|
| 1059 |
+
op = g.as_graph_element(tensor_name).op
|
| 1060 |
+
if util.IsLoopEnter(op):
|
| 1061 |
+
# pylint: disable=protected-access
|
| 1062 |
+
op._set_attr("frame_name",
|
| 1063 |
+
attr_value_pb2.AttrValue(s=compat.as_bytes(self.name)))
|
| 1064 |
+
# pylint: enable=protected-access
|
| 1065 |
+
self._graph = ops.get_default_graph()
|
| 1066 |
+
|
| 1067 |
+
@property
|
| 1068 |
+
def maximum_iterations(self):
|
| 1069 |
+
"""The maximum number of iterations that will be executed."""
|
| 1070 |
+
return self._maximum_iterations
|
| 1071 |
+
|
| 1072 |
+
@property
|
| 1073 |
+
def parallel_iterations(self):
|
| 1074 |
+
"""The number of iterations allowed to run in parallel."""
|
| 1075 |
+
return self._parallel_iterations
|
| 1076 |
+
|
| 1077 |
+
@property
|
| 1078 |
+
def back_prop(self):
|
| 1079 |
+
"""True iff backprop is enabled for this while loop."""
|
| 1080 |
+
return self._back_prop
|
| 1081 |
+
|
| 1082 |
+
@property
|
| 1083 |
+
def swap_memory(self):
|
| 1084 |
+
"""True iff GPU-CPU memory swap is enabled for this while loop."""
|
| 1085 |
+
return self._swap_memory
|
| 1086 |
+
|
| 1087 |
+
@property
|
| 1088 |
+
def pivot(self):
|
| 1089 |
+
"""The boolean tensor representing the loop termination condition."""
|
| 1090 |
+
return self._pivot
|
| 1091 |
+
|
| 1092 |
+
@property
|
| 1093 |
+
def loop_enters(self):
|
| 1094 |
+
"""The list of enter tensors for loop variables."""
|
| 1095 |
+
return self._loop_enters
|
| 1096 |
+
|
| 1097 |
+
@property
|
| 1098 |
+
def loop_exits(self):
|
| 1099 |
+
"""The list of exit tensors for loop variables."""
|
| 1100 |
+
return self._loop_exits
|
| 1101 |
+
|
| 1102 |
+
@property
|
| 1103 |
+
def grad_state(self):
|
| 1104 |
+
"""The gradient loop state."""
|
| 1105 |
+
return self._grad_state
|
| 1106 |
+
|
| 1107 |
+
def to_proto(self, export_scope=None):
|
| 1108 |
+
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
|
| 1109 |
+
|
| 1110 |
+
Args:
|
| 1111 |
+
export_scope: Optional `string`. Name scope to remove.
|
| 1112 |
+
|
| 1113 |
+
Returns:
|
| 1114 |
+
A `WhileContextDef` protocol buffer.
|
| 1115 |
+
"""
|
| 1116 |
+
if (export_scope is None or self.name.startswith(export_scope)):
|
| 1117 |
+
context_def = control_flow_pb2.WhileContextDef()
|
| 1118 |
+
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
|
| 1119 |
+
context_def.parallel_iterations = self._parallel_iterations
|
| 1120 |
+
if self._maximum_iterations is not None:
|
| 1121 |
+
context_def.maximum_iterations_name = ops.strip_name_scope(
|
| 1122 |
+
self._maximum_iterations.name, export_scope)
|
| 1123 |
+
context_def.back_prop = self._back_prop
|
| 1124 |
+
context_def.swap_memory = self._swap_memory
|
| 1125 |
+
context_def.pivot_for_pred_name = ops.strip_name_scope(
|
| 1126 |
+
self._pivot_for_pred.name, export_scope)
|
| 1127 |
+
context_def.pivot_for_body_name = ops.strip_name_scope(
|
| 1128 |
+
self._pivot_for_body.name, export_scope)
|
| 1129 |
+
context_def.pivot_name = ops.strip_name_scope(self._pivot.name,
|
| 1130 |
+
export_scope)
|
| 1131 |
+
context_def.loop_exit_names.extend([
|
| 1132 |
+
ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits
|
| 1133 |
+
])
|
| 1134 |
+
context_def.loop_enter_names.extend([
|
| 1135 |
+
ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters
|
| 1136 |
+
])
|
| 1137 |
+
context_def.values_def.MergeFrom(
|
| 1138 |
+
super(WhileContext, self)._to_values_def(export_scope=export_scope))
|
| 1139 |
+
for nested in self._nested_contexts:
|
| 1140 |
+
nested_def = context_def.nested_contexts.add()
|
| 1141 |
+
nested.to_control_flow_context_def(nested_def)
|
| 1142 |
+
|
| 1143 |
+
return context_def
|
| 1144 |
+
else:
|
| 1145 |
+
return None
|
| 1146 |
+
|
| 1147 |
+
def to_control_flow_context_def(self, context_def, export_scope=None):
|
| 1148 |
+
context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))
|
| 1149 |
+
|
| 1150 |
+
@staticmethod
|
| 1151 |
+
def from_proto(context_def, import_scope=None):
|
| 1152 |
+
"""Returns a `WhileContext` object created from `context_def`.
|
| 1153 |
+
|
| 1154 |
+
Args:
|
| 1155 |
+
context_def: A `WhileContextDef` protocol buffer.
|
| 1156 |
+
import_scope: Optional `string`. Name scope to add.
|
| 1157 |
+
|
| 1158 |
+
Returns:
|
| 1159 |
+
A `WhileContext` Python object.
|
| 1160 |
+
"""
|
| 1161 |
+
ret = WhileContext(context_def=context_def, import_scope=import_scope)
|
| 1162 |
+
ret.Enter()
|
| 1163 |
+
for nested_def in context_def.nested_contexts:
|
| 1164 |
+
from_control_flow_context_def(nested_def, import_scope=import_scope)
|
| 1165 |
+
ret.Exit()
|
| 1166 |
+
return ret
|
| 1167 |
+
|
| 1168 |
+
def GetWhileContext(self):
|
| 1169 |
+
return self
|
| 1170 |
+
|
| 1171 |
+
def GetControlPivot(self):
|
| 1172 |
+
if self._pivot_for_body is not None:
|
| 1173 |
+
return self._pivot_for_body
|
| 1174 |
+
return self._pivot_for_pred
|
| 1175 |
+
|
| 1176 |
+
def AddValue(self, val):
|
| 1177 |
+
"""Add `val` to the current context and its outer context recursively."""
|
| 1178 |
+
result = val
|
| 1179 |
+
new_value = val.name not in self._values
|
| 1180 |
+
# Don't treat ops in this context as new values. Usually all known values
|
| 1181 |
+
# are in self._values, except when we're importing a while loop inside this
|
| 1182 |
+
# WhileContext. Since there's a cycle in this case, `val` may be part of the
|
| 1183 |
+
# imported while loop but not yet processed by this context and added to
|
| 1184 |
+
# self._values in _AddOpInternal. We only want to process external input
|
| 1185 |
+
# tensors to the while loop here.
|
| 1186 |
+
new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access
|
| 1187 |
+
if new_value:
|
| 1188 |
+
self._values.add(val.name)
|
| 1189 |
+
|
| 1190 |
+
# If we are in a grad context and val is from its forward context,
|
| 1191 |
+
# use GetRealValue(), which adds the logic to save the history of
|
| 1192 |
+
# val in forward.
|
| 1193 |
+
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
|
| 1194 |
+
if grad_ctxt:
|
| 1195 |
+
grad_ctxt = grad_ctxt.GetWhileContext()
|
| 1196 |
+
if grad_ctxt.grad_state:
|
| 1197 |
+
forward_ctxt = util.GetWhileContext(val.op)
|
| 1198 |
+
if util.IsLoopExit(val.op):
|
| 1199 |
+
forward_ctxt = forward_ctxt.outer_context
|
| 1200 |
+
if forward_ctxt:
|
| 1201 |
+
forward_ctxt = forward_ctxt.GetWhileContext()
|
| 1202 |
+
if forward_ctxt == grad_ctxt.grad_state.forward_context:
|
| 1203 |
+
real_val = grad_ctxt.grad_state.GetRealValue(val)
|
| 1204 |
+
self._external_values[val.name] = real_val
|
| 1205 |
+
return real_val
|
| 1206 |
+
|
| 1207 |
+
if self._outer_context is not None:
|
| 1208 |
+
result = self._outer_context.AddValue(val)
|
| 1209 |
+
# Create an Enter to make `result` known to this loop context.
|
| 1210 |
+
with ops.control_dependencies(None):
|
| 1211 |
+
enter = _Enter(
|
| 1212 |
+
result,
|
| 1213 |
+
self._name,
|
| 1214 |
+
is_constant=True,
|
| 1215 |
+
parallel_iterations=self._parallel_iterations)
|
| 1216 |
+
enter.graph.prevent_feeding(enter)
|
| 1217 |
+
if self._outer_context:
|
| 1218 |
+
self._outer_context.AddInnerOp(enter.op)
|
| 1219 |
+
# Fix the control inputs and control flow context of these enter ops.
|
| 1220 |
+
self._FixControlInputsAndContext([enter])
|
| 1221 |
+
|
| 1222 |
+
# Add `enter` in this context.
|
| 1223 |
+
self._values.add(enter.name)
|
| 1224 |
+
self._external_values[val.name] = enter
|
| 1225 |
+
result = enter
|
| 1226 |
+
else:
|
| 1227 |
+
actual_val = self._external_values.get(val.name)
|
| 1228 |
+
if actual_val is not None:
|
| 1229 |
+
result = actual_val
|
| 1230 |
+
return result
|
| 1231 |
+
|
| 1232 |
+
def AddOp(self, op: ops.Operation):
|
| 1233 |
+
"""Add `op` to the current context."""
|
| 1234 |
+
# For a reduction op, if op is in a grad context and its input is from
|
| 1235 |
+
# its forward context, moving op to the forward context means we would
|
| 1236 |
+
# store the tensor after the reduction as opposed to the tensor before
|
| 1237 |
+
# reduction, and therefore could significantly reduce memory consumption.
|
| 1238 |
+
# For now, we do this only for a few ops.
|
| 1239 |
+
#
|
| 1240 |
+
# If in XLA context, do not move constant ops to forward pass as pushing to
|
| 1241 |
+
# and popping from a stack removes the constant property of an op and breaks
|
| 1242 |
+
# XLA compilation, which requires certain inputs to be constant for certain
|
| 1243 |
+
# ops.
|
| 1244 |
+
if not util.IsInXLAContext(op) and op.type in {"Shape", "Size", "Rank"}:
|
| 1245 |
+
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
|
| 1246 |
+
if grad_ctxt:
|
| 1247 |
+
grad_ctxt = grad_ctxt.GetWhileContext()
|
| 1248 |
+
if grad_ctxt.grad_state:
|
| 1249 |
+
op_input_forward_ctxt = util.GetWhileContext(op.inputs[0].op)
|
| 1250 |
+
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
|
| 1251 |
+
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
|
| 1252 |
+
op._set_control_flow_context(op_input_ctxt)
|
| 1253 |
+
op_input_ctxt._AddOpInternal(op)
|
| 1254 |
+
return
|
| 1255 |
+
self._AddOpInternal(op)
|
| 1256 |
+
|
| 1257 |
+
# pylint: disable=g-doc-args
|
| 1258 |
+
def _AddOpInternal(self, op: ops.Operation):
|
| 1259 |
+
"""Add `op` to the current context.
|
| 1260 |
+
|
| 1261 |
+
We move any external control dependencies of the op to the loop pivot, to
|
| 1262 |
+
ensure they get executed.
|
| 1263 |
+
"""
|
| 1264 |
+
# This is needed to prevent frame mismatch errors where there are Const
|
| 1265 |
+
# nodes inside tf.function in v1 while_loop and inlining is turned on.
|
| 1266 |
+
if op.type in ["PartitionedCall", "StatefulPartitionedCall"]:
|
| 1267 |
+
op._add_control_input(self.GetControlPivot().op) # pylint: disable=protected-access
|
| 1268 |
+
if not op.inputs:
|
| 1269 |
+
# Remove any external control dependency on this op
|
| 1270 |
+
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
|
| 1271 |
+
# Add a control edge from the control pivot to this op.
|
| 1272 |
+
if not control_inputs:
|
| 1273 |
+
# pylint: disable=protected-access
|
| 1274 |
+
op._add_control_input(self.GetControlPivot().op)
|
| 1275 |
+
# pylint: enable=protected-access
|
| 1276 |
+
for x in op.outputs:
|
| 1277 |
+
self._values.add(x.name)
|
| 1278 |
+
else:
|
| 1279 |
+
for index in range(len(op.inputs)):
|
| 1280 |
+
x = op.inputs[index]
|
| 1281 |
+
real_x = self.AddValue(x)
|
| 1282 |
+
if real_x != x:
|
| 1283 |
+
op._update_input(index, real_x) # pylint: disable=protected-access
|
| 1284 |
+
# Remove any external control dependency on this op.
|
| 1285 |
+
_, external_inputs = self._RemoveExternalControlEdges(op)
|
| 1286 |
+
# Add a control dependency to prevent loop invariants from
|
| 1287 |
+
# enabling ops that should not be executed.
|
| 1288 |
+
self._MaybeAddControlDependency(op)
|
| 1289 |
+
for x in op.outputs:
|
| 1290 |
+
self._values.add(x.name)
|
| 1291 |
+
if external_inputs:
|
| 1292 |
+
# Use an identity to pull control inputs as data inputs. Note that we
|
| 1293 |
+
# ignore ops which don't have outputs. TODO(apassos): fix that
|
| 1294 |
+
with ops.control_dependencies(None):
|
| 1295 |
+
self.Enter()
|
| 1296 |
+
external_inputs = [
|
| 1297 |
+
array_ops.identity(x.outputs[0]).op
|
| 1298 |
+
for x in external_inputs
|
| 1299 |
+
if x.outputs
|
| 1300 |
+
]
|
| 1301 |
+
self.Exit()
|
| 1302 |
+
op._add_control_inputs(external_inputs) # pylint: disable=protected-access
|
| 1303 |
+
if self._outer_context or not util.IsLoopExit(op):
|
| 1304 |
+
op.graph.prevent_fetching(op)
|
| 1305 |
+
for x in op.outputs:
|
| 1306 |
+
op.graph.prevent_feeding(x)
|
| 1307 |
+
|
| 1308 |
+
if self._outer_context:
|
| 1309 |
+
self._outer_context.AddInnerOp(op)
|
| 1310 |
+
|
| 1311 |
+
def _MaybeAddControlDependency(self, op: ops.Operation):
|
| 1312 |
+
"""Add a control input to the op if it only depends on loop invariants."""
|
| 1313 |
+
|
| 1314 |
+
def _IsOpFree(op):
|
| 1315 |
+
"""Determines if `op` needs a control dependency."""
|
| 1316 |
+
if op.control_inputs:
|
| 1317 |
+
return False
|
| 1318 |
+
# pylint: disable=protected-access
|
| 1319 |
+
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
|
| 1320 |
+
return True
|
| 1321 |
+
# pylint: enable=protected-access
|
| 1322 |
+
for x in op.inputs:
|
| 1323 |
+
if not util.IsLoopConstantEnter(x.op):
|
| 1324 |
+
return False
|
| 1325 |
+
return True
|
| 1326 |
+
|
| 1327 |
+
if _IsOpFree(op):
|
| 1328 |
+
# pylint: disable=protected-access
|
| 1329 |
+
op._add_control_input(self.GetControlPivot().op)
|
| 1330 |
+
# pylint: enable=protected-access
|
| 1331 |
+
|
| 1332 |
+
def AddForwardLoopCounter(self, outer_grad_state):
|
| 1333 |
+
"""Adds a loop that counts the number of iterations.
|
| 1334 |
+
|
| 1335 |
+
This is added to the forward loop at the time when we start to
|
| 1336 |
+
create the loop for backprop gradient computation. Called in
|
| 1337 |
+
the outer context of this forward context.
|
| 1338 |
+
|
| 1339 |
+
The pseudocode is:
|
| 1340 |
+
`n = 0; while (_pivot) { n++; }`
|
| 1341 |
+
|
| 1342 |
+
Note that a control dependency is added to `n` to ensure the correct
|
| 1343 |
+
execution order of stack push ops.
|
| 1344 |
+
|
| 1345 |
+
Args:
|
| 1346 |
+
outer_grad_state: The outer grad state. None if not nested.
|
| 1347 |
+
|
| 1348 |
+
Returns:
|
| 1349 |
+
The number of iterations taken by the forward loop and the loop index.
|
| 1350 |
+
"""
|
| 1351 |
+
n = constant_op.constant(0, name="f_count")
|
| 1352 |
+
if outer_grad_state is not None:
|
| 1353 |
+
# Force the stack pushes of i-th execution of an inner loop to be ordered
|
| 1354 |
+
# before the pushes of (i+1)-th execution of the same inner loop.
|
| 1355 |
+
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
|
| 1356 |
+
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
|
| 1357 |
+
|
| 1358 |
+
self.Enter()
|
| 1359 |
+
self.AddName(n.name)
|
| 1360 |
+
enter_n = _Enter(
|
| 1361 |
+
n,
|
| 1362 |
+
self._name,
|
| 1363 |
+
is_constant=False,
|
| 1364 |
+
parallel_iterations=self._parallel_iterations,
|
| 1365 |
+
name="f_count")
|
| 1366 |
+
self.loop_enters.append(enter_n)
|
| 1367 |
+
|
| 1368 |
+
merge_n = merge([enter_n, enter_n])[0]
|
| 1369 |
+
switch_n = switch(merge_n, self._pivot)
|
| 1370 |
+
|
| 1371 |
+
index = math_ops.add(switch_n[1], 1)
|
| 1372 |
+
next_n = _NextIteration(index)
|
| 1373 |
+
merge_n.op._update_input(1, next_n)
|
| 1374 |
+
|
| 1375 |
+
total_iterations = exit(switch_n[0], name="f_count")
|
| 1376 |
+
self.loop_exits.append(total_iterations)
|
| 1377 |
+
self.ExitResult([total_iterations])
|
| 1378 |
+
self.Exit()
|
| 1379 |
+
return total_iterations, next_n
|
| 1380 |
+
|
| 1381 |
+
def AddBackpropLoopCounter(self, count, outer_grad_state):
|
| 1382 |
+
"""Add the backprop loop that controls the iterations.
|
| 1383 |
+
|
| 1384 |
+
This is added to the backprop loop. It is used to control the loop
|
| 1385 |
+
termination of the backprop loop. Called in the outer context of
|
| 1386 |
+
this grad context.
|
| 1387 |
+
|
| 1388 |
+
The pseudocode is:
|
| 1389 |
+
`n = count; while (n >= 1) { n--; }`
|
| 1390 |
+
|
| 1391 |
+
Note that a control dependency is added to `final_zero` to ensure the
|
| 1392 |
+
correct execution order of stack pop ops.
|
| 1393 |
+
|
| 1394 |
+
Args:
|
| 1395 |
+
count: The number of iterations for backprop.
|
| 1396 |
+
outer_grad_state: The outer grad state. None if not nested.
|
| 1397 |
+
|
| 1398 |
+
Returns:
|
| 1399 |
+
The loop index.
|
| 1400 |
+
"""
|
| 1401 |
+
in_separate_functions = count.graph is not ops.get_default_graph()
|
| 1402 |
+
if in_separate_functions:
|
| 1403 |
+
# Brings the count into this graph
|
| 1404 |
+
count = array_ops.identity(count)
|
| 1405 |
+
else:
|
| 1406 |
+
# TODO(apassos) XLA expects this constant to be created outside the loop,
|
| 1407 |
+
# so doing that for now.
|
| 1408 |
+
one = constant_op.constant(1, name="b_count")
|
| 1409 |
+
|
| 1410 |
+
self.Enter()
|
| 1411 |
+
self.AddName(count.name)
|
| 1412 |
+
enter_count = _Enter(
|
| 1413 |
+
count,
|
| 1414 |
+
self._name,
|
| 1415 |
+
is_constant=False,
|
| 1416 |
+
parallel_iterations=self._parallel_iterations,
|
| 1417 |
+
name="b_count")
|
| 1418 |
+
self.loop_enters.append(enter_count)
|
| 1419 |
+
|
| 1420 |
+
merge_count = merge([enter_count, enter_count])[0]
|
| 1421 |
+
self._pivot_for_pred = merge_count
|
| 1422 |
+
|
| 1423 |
+
if in_separate_functions:
|
| 1424 |
+
one = constant_op.constant(1, name="b_count")
|
| 1425 |
+
pred = math_ops.greater_equal(merge_count, one)
|
| 1426 |
+
self._pivot = loop_cond(pred, name="b_count")
|
| 1427 |
+
switch_count = switch(merge_count, self._pivot)
|
| 1428 |
+
|
| 1429 |
+
index = math_ops.subtract(switch_count[1], one)
|
| 1430 |
+
self._pivot_for_body = index
|
| 1431 |
+
next_count = _NextIteration(index)
|
| 1432 |
+
merge_count.op._update_input(1, next_count)
|
| 1433 |
+
|
| 1434 |
+
final_zero = exit(switch_count[0], name="b_count")
|
| 1435 |
+
self.loop_exits.append(final_zero)
|
| 1436 |
+
if outer_grad_state is not None:
|
| 1437 |
+
# Force the stack pops of i-th execution of an inner loop to be ordered
|
| 1438 |
+
# before the pops of (i+1)-th execution of the same inner loop.
|
| 1439 |
+
# pylint: disable=protected-access
|
| 1440 |
+
outer_grad_state.grad_sync._add_control_input(final_zero.op)
|
| 1441 |
+
# pylint: enable=protected-access
|
| 1442 |
+
|
| 1443 |
+
self.ExitResult([final_zero])
|
| 1444 |
+
self.Exit()
|
| 1445 |
+
return next_count
|
| 1446 |
+
|
| 1447 |
+
def AddBackpropAccumulator(self, op: ops.Operation, grad):
|
| 1448 |
+
"""Add an accumulation loop for every loop invariant.
|
| 1449 |
+
|
| 1450 |
+
This is added to the backprop loop. It is used to accumulate partial
|
| 1451 |
+
gradients within each loop iteration. Called when in the gradient while
|
| 1452 |
+
context.
|
| 1453 |
+
|
| 1454 |
+
The pseudocode is:
|
| 1455 |
+
```
|
| 1456 |
+
acc = 0.0;
|
| 1457 |
+
while (_pivot) {
|
| 1458 |
+
acc += grad;
|
| 1459 |
+
}
|
| 1460 |
+
```
|
| 1461 |
+
|
| 1462 |
+
Args:
|
| 1463 |
+
op: The Enter op for a loop invariant.
|
| 1464 |
+
grad: The partial gradient of an iteration for a loop invariant.
|
| 1465 |
+
|
| 1466 |
+
Returns:
|
| 1467 |
+
The gradient for a loop invariant.
|
| 1468 |
+
"""
|
| 1469 |
+
self.Exit()
|
| 1470 |
+
# Create a zeros tensor with the right shape for acc. If we don't
|
| 1471 |
+
# know the full shape statically, we will have to get the shape
|
| 1472 |
+
# dynamically from the forward inference. Getting the shape right
|
| 1473 |
+
# for the zeros is only needed for the base case when the loop exits
|
| 1474 |
+
# without running any iterations.
|
| 1475 |
+
shape = grad.get_shape()
|
| 1476 |
+
if shape.is_fully_defined():
|
| 1477 |
+
if self.outer_context:
|
| 1478 |
+
self.outer_context.Enter()
|
| 1479 |
+
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
|
| 1480 |
+
if self.outer_context:
|
| 1481 |
+
self.outer_context.Exit()
|
| 1482 |
+
else:
|
| 1483 |
+
value = op.inputs[0]
|
| 1484 |
+
if (isinstance(self.outer_context, WhileContext) and
|
| 1485 |
+
self.outer_context.grad_state is not None):
|
| 1486 |
+
# We are in a nested while loop.
|
| 1487 |
+
forward_ctxt = self.grad_state.forward_context
|
| 1488 |
+
forward_ctxt.outer_context.Enter()
|
| 1489 |
+
zeros_shape = array_ops.shape_internal(value, optimize=False)
|
| 1490 |
+
forward_ctxt.outer_context.Exit()
|
| 1491 |
+
outer_grad_state = self.grad_state.outer_grad_state
|
| 1492 |
+
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
|
| 1493 |
+
zeros_shape)
|
| 1494 |
+
self.outer_context.Enter()
|
| 1495 |
+
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
|
| 1496 |
+
history_zeros_shape, zeros_shape)
|
| 1497 |
+
acc = array_ops.zeros(real_shape, grad.dtype)
|
| 1498 |
+
self.outer_context.Exit()
|
| 1499 |
+
else:
|
| 1500 |
+
if self.outer_context:
|
| 1501 |
+
self.outer_context.Enter()
|
| 1502 |
+
zeros_shape = array_ops.shape_internal(value, optimize=False)
|
| 1503 |
+
acc = array_ops.zeros(zeros_shape, grad.dtype)
|
| 1504 |
+
if self.outer_context:
|
| 1505 |
+
self.outer_context.Exit()
|
| 1506 |
+
|
| 1507 |
+
self.Enter()
|
| 1508 |
+
self.AddName(acc.name)
|
| 1509 |
+
enter_acc = _Enter(
|
| 1510 |
+
acc,
|
| 1511 |
+
self._name,
|
| 1512 |
+
is_constant=False,
|
| 1513 |
+
parallel_iterations=self._parallel_iterations,
|
| 1514 |
+
name="b_acc")
|
| 1515 |
+
self.loop_enters.append(enter_acc)
|
| 1516 |
+
|
| 1517 |
+
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
|
| 1518 |
+
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
|
| 1519 |
+
|
| 1520 |
+
add_acc = math_ops.add(switch_acc_true, grad)
|
| 1521 |
+
next_acc = _NextIteration(add_acc)
|
| 1522 |
+
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
|
| 1523 |
+
|
| 1524 |
+
result_acc = exit(switch_acc_false, name="b_acc")
|
| 1525 |
+
self.loop_exits.append(result_acc)
|
| 1526 |
+
self.ExitResult([result_acc])
|
| 1527 |
+
return result_acc
|
| 1528 |
+
|
| 1529 |
+
def AddBackpropIndexedSlicesAccumulator(self, op: ops.Operation, grad):
|
| 1530 |
+
"""This is used for accumulating gradients that are IndexedSlices.
|
| 1531 |
+
|
| 1532 |
+
This is essentially the equivalent of AddBackpropAccumulator but optimized
|
| 1533 |
+
for things like updating embeddings from within a while loop.
|
| 1534 |
+
|
| 1535 |
+
Args:
|
| 1536 |
+
op: The Enter op for a loop invariant.
|
| 1537 |
+
grad: The partial gradients represented as an IndexedSlices.
|
| 1538 |
+
|
| 1539 |
+
Returns:
|
| 1540 |
+
The accumulated IndexedSlices gradient of the loop invariant.
|
| 1541 |
+
"""
|
| 1542 |
+
values = grad.values
|
| 1543 |
+
indices = grad.indices
|
| 1544 |
+
dense_shape = grad.dense_shape
|
| 1545 |
+
|
| 1546 |
+
self.Exit()
|
| 1547 |
+
if self.outer_context:
|
| 1548 |
+
self.outer_context.Enter()
|
| 1549 |
+
if values.get_shape().is_fully_defined():
|
| 1550 |
+
values_shape = tensor_shape.TensorShape([tensor_shape.Dimension(1)] +
|
| 1551 |
+
values.get_shape().dims[1:])
|
| 1552 |
+
if self.outer_context:
|
| 1553 |
+
self.outer_context.Enter()
|
| 1554 |
+
values_acc = constant_op.constant(
|
| 1555 |
+
0, values.dtype, shape=values_shape, name="b_acc")
|
| 1556 |
+
if self.outer_context:
|
| 1557 |
+
self.outer_context.Exit()
|
| 1558 |
+
else:
|
| 1559 |
+
values_shape = _resource_safe_shape(op.inputs[0])[1:]
|
| 1560 |
+
values_shape = array_ops.concat([[1], values_shape], 0)
|
| 1561 |
+
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
|
| 1562 |
+
indices_acc = constant_op.constant([0], indices.dtype)
|
| 1563 |
+
shape_acc = None
|
| 1564 |
+
if dense_shape is not None:
|
| 1565 |
+
if dense_shape.get_shape().is_fully_defined():
|
| 1566 |
+
if self.outer_context:
|
| 1567 |
+
self.outer_context.Enter()
|
| 1568 |
+
shape_acc = constant_op.constant(
|
| 1569 |
+
0, dense_shape.dtype, shape=dense_shape.get_shape())
|
| 1570 |
+
if self.outer_context:
|
| 1571 |
+
self.outer_context.Exit()
|
| 1572 |
+
else:
|
| 1573 |
+
shape_acc = array_ops.zeros_like(
|
| 1574 |
+
array_ops.shape_internal(
|
| 1575 |
+
op.inputs[0], optimize=False, out_type=dense_shape.dtype),
|
| 1576 |
+
optimize=False)
|
| 1577 |
+
|
| 1578 |
+
if self.outer_context:
|
| 1579 |
+
self.outer_context.Exit()
|
| 1580 |
+
|
| 1581 |
+
self.Enter()
|
| 1582 |
+
self.AddName(values_acc.name)
|
| 1583 |
+
self.AddName(indices_acc.name)
|
| 1584 |
+
init_acc = [indices_acc, values_acc]
|
| 1585 |
+
if shape_acc is not None:
|
| 1586 |
+
self.AddName(shape_acc.name)
|
| 1587 |
+
init_acc.append(shape_acc)
|
| 1588 |
+
|
| 1589 |
+
# Set use_input_shape=False since the accumulator tensors will grow in
|
| 1590 |
+
# size. If use_input_shape=True, the _update_input call below will result in
|
| 1591 |
+
# incompatible shapes.
|
| 1592 |
+
enter_acc = [
|
| 1593 |
+
_Enter(
|
| 1594 |
+
x,
|
| 1595 |
+
self._name,
|
| 1596 |
+
is_constant=False,
|
| 1597 |
+
parallel_iterations=self._parallel_iterations,
|
| 1598 |
+
use_input_shape=False,
|
| 1599 |
+
name="b_acc") for x in init_acc
|
| 1600 |
+
]
|
| 1601 |
+
# Manually set appropriate partial shapes.
|
| 1602 |
+
enter_acc[0].set_shape([None])
|
| 1603 |
+
if values_acc.shape.dims is not None:
|
| 1604 |
+
enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:])
|
| 1605 |
+
self.loop_enters.extend(enter_acc)
|
| 1606 |
+
|
| 1607 |
+
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
|
| 1608 |
+
switch_acc = [switch(x, self._pivot) for x in merge_acc]
|
| 1609 |
+
|
| 1610 |
+
# The actual accumulation.
|
| 1611 |
+
acc_indexed_slices = [
|
| 1612 |
+
array_ops.concat([xa[1], xv], 0)
|
| 1613 |
+
for xa, xv in zip(switch_acc[:2], [indices, values])
|
| 1614 |
+
]
|
| 1615 |
+
if shape_acc is not None:
|
| 1616 |
+
# For the shape we just keep the maximum
|
| 1617 |
+
acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1]))
|
| 1618 |
+
|
| 1619 |
+
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
|
| 1620 |
+
for xm, xn in zip(merge_acc, next_acc):
|
| 1621 |
+
xm.op._update_input(1, xn) # pylint: disable=protected-access
|
| 1622 |
+
|
| 1623 |
+
exit_acc = [exit(x[0], name="b_acc") for x in switch_acc]
|
| 1624 |
+
self.loop_exits.extend(exit_acc)
|
| 1625 |
+
|
| 1626 |
+
self.ExitResult(exit_acc)
|
| 1627 |
+
return indexed_slices.IndexedSlices(
|
| 1628 |
+
indices=exit_acc[0],
|
| 1629 |
+
values=exit_acc[1],
|
| 1630 |
+
dense_shape=exit_acc[2] if shape_acc is not None else None)
|
| 1631 |
+
|
| 1632 |
+
def _InitializeValues(self, values):
|
| 1633 |
+
"""Makes the values known to this context."""
|
| 1634 |
+
self._values = set()
|
| 1635 |
+
for x in values:
|
| 1636 |
+
if isinstance(x, tensor_lib.Tensor):
|
| 1637 |
+
self._values.add(x.name)
|
| 1638 |
+
else:
|
| 1639 |
+
raise TypeError("'values' must be a list of Tensors. "
|
| 1640 |
+
f"Received: {type(x)}.")
|
| 1641 |
+
|
| 1642 |
+
def _BuildLoop(self, pred, body, flat_orig_loop_vars, flat_loop_vars,
|
| 1643 |
+
loop_vars_signature):
|
| 1644 |
+
"""Core: Add the loop termination condition and body to the graph."""
|
| 1645 |
+
flat_shape_invariants = nest.map_structure(
|
| 1646 |
+
lambda spec: spec.shape,
|
| 1647 |
+
nest.flatten(loop_vars_signature, expand_composites=True))
|
| 1648 |
+
|
| 1649 |
+
# Let the context know the loop variables so the loop variables
|
| 1650 |
+
# would be added in the outer contexts properly.
|
| 1651 |
+
self._InitializeValues(flat_loop_vars)
|
| 1652 |
+
if self._outer_context:
|
| 1653 |
+
real_vars = [self._outer_context.AddValue(x) for x in flat_loop_vars]
|
| 1654 |
+
else:
|
| 1655 |
+
real_vars = flat_loop_vars
|
| 1656 |
+
|
| 1657 |
+
enter_vars = []
|
| 1658 |
+
with ops.control_dependencies(None):
|
| 1659 |
+
for real_var, shape_invariant in zip(real_vars, flat_shape_invariants):
|
| 1660 |
+
enter_var = _Enter(
|
| 1661 |
+
real_var,
|
| 1662 |
+
self._name,
|
| 1663 |
+
is_constant=False,
|
| 1664 |
+
parallel_iterations=self._parallel_iterations,
|
| 1665 |
+
use_input_shape=False)
|
| 1666 |
+
|
| 1667 |
+
if _ShapeLessThanOrEqual(real_var.get_shape(), shape_invariant):
|
| 1668 |
+
enter_var.set_shape(shape_invariant)
|
| 1669 |
+
else:
|
| 1670 |
+
raise ValueError(
|
| 1671 |
+
f"The shape invariant specified for {real_var.name} is not "
|
| 1672 |
+
"compatible with the initial shape of the loop variable. It "
|
| 1673 |
+
f"enters the loop with shape {real_var.get_shape()}, but the "
|
| 1674 |
+
f"specified shape invariant is {shape_invariant}.")
|
| 1675 |
+
|
| 1676 |
+
enter_var.graph.prevent_feeding(enter_var)
|
| 1677 |
+
if self._outer_context:
|
| 1678 |
+
self._outer_context.AddInnerOp(enter_var.op)
|
| 1679 |
+
enter_vars.append(enter_var)
|
| 1680 |
+
|
| 1681 |
+
# Finds the closest enclosing non-None control pivot.
|
| 1682 |
+
outer_context = self._outer_context
|
| 1683 |
+
control_pivot = None
|
| 1684 |
+
while outer_context is not None and control_pivot is None:
|
| 1685 |
+
control_pivot = outer_context.GetControlPivot()
|
| 1686 |
+
# pylint: disable=protected-access
|
| 1687 |
+
outer_context = outer_context._outer_context
|
| 1688 |
+
# pylint: enable=protected-access
|
| 1689 |
+
|
| 1690 |
+
if control_pivot is not None:
|
| 1691 |
+
for var in enter_vars:
|
| 1692 |
+
if util.IsLoopConstantEnter(var.op.inputs[0].op):
|
| 1693 |
+
# pylint: disable=protected-access
|
| 1694 |
+
var.op._add_control_input(control_pivot.op)
|
| 1695 |
+
# pylint: enable=protected-access
|
| 1696 |
+
|
| 1697 |
+
# Fix the control inputs and control flow context of these enter ops.
|
| 1698 |
+
self._FixControlInputsAndContext(enter_vars)
|
| 1699 |
+
self._InitializeValues(enter_vars)
|
| 1700 |
+
self._loop_enters = enter_vars
|
| 1701 |
+
|
| 1702 |
+
merge_vars = [merge([x, x])[0] for x in enter_vars]
|
| 1703 |
+
self._pivot_for_pred = merge_vars[0]
|
| 1704 |
+
|
| 1705 |
+
merge_vars_with_tensorarrays = nest.map_structure(
|
| 1706 |
+
_convert_flow_to_tensorarray, flat_orig_loop_vars, merge_vars)
|
| 1707 |
+
# Build the graph for pred.
|
| 1708 |
+
packed_vars = nest.pack_sequence_as(
|
| 1709 |
+
structure=loop_vars_signature,
|
| 1710 |
+
flat_sequence=merge_vars_with_tensorarrays,
|
| 1711 |
+
expand_composites=True)
|
| 1712 |
+
c = ops.convert_to_tensor(pred(*packed_vars))
|
| 1713 |
+
self._pivot = loop_cond(c, name="LoopCond")
|
| 1714 |
+
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
|
| 1715 |
+
|
| 1716 |
+
# Build the graph for body.
|
| 1717 |
+
vars_for_body = [_Identity(x[1]) for x in switch_vars]
|
| 1718 |
+
self._pivot_for_body = vars_for_body[0]
|
| 1719 |
+
# Convert TensorArray flow variables inside the context back into
|
| 1720 |
+
# their associated TensorArrays for calling the body.
|
| 1721 |
+
vars_for_body_with_tensorarrays = nest.map_structure(
|
| 1722 |
+
_convert_flow_to_tensorarray, flat_orig_loop_vars, vars_for_body)
|
| 1723 |
+
packed_vars_for_body = nest.pack_sequence_as(
|
| 1724 |
+
structure=loop_vars_signature,
|
| 1725 |
+
flat_sequence=vars_for_body_with_tensorarrays,
|
| 1726 |
+
expand_composites=True)
|
| 1727 |
+
pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 1728 |
+
body_result = body(*packed_vars_for_body)
|
| 1729 |
+
post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 1730 |
+
if not nest.is_nested(body_result):
|
| 1731 |
+
body_result = [body_result]
|
| 1732 |
+
if len(post_summaries) > len(pre_summaries):
|
| 1733 |
+
new_summaries = post_summaries[len(pre_summaries):]
|
| 1734 |
+
summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
|
| 1735 |
+
summary_ref[:] = pre_summaries
|
| 1736 |
+
with ops.control_dependencies(new_summaries):
|
| 1737 |
+
|
| 1738 |
+
def map_fn(x):
|
| 1739 |
+
# TODO(apassos) figure out how to trigger with tensor arrays as well
|
| 1740 |
+
if isinstance(x, tensor_array_ops.TensorArray):
|
| 1741 |
+
return x
|
| 1742 |
+
return array_ops.identity(x)
|
| 1743 |
+
|
| 1744 |
+
body_result = nest.map_structure(
|
| 1745 |
+
map_fn, body_result, expand_composites=True)
|
| 1746 |
+
|
| 1747 |
+
body_result = variable_utils.convert_variables_to_tensors(body_result)
|
| 1748 |
+
# Compare the structure types of input and output of body.
|
| 1749 |
+
# For backwards compatibility, the first layer is forced to a list
|
| 1750 |
+
# during this comparison, because inputs are typically lists and
|
| 1751 |
+
# outputs of the body are typically tuples.
|
| 1752 |
+
nest.assert_same_structure(
|
| 1753 |
+
list(packed_vars_for_body), list(body_result), expand_composites=True)
|
| 1754 |
+
|
| 1755 |
+
# Store body_result to keep track of TensorArrays returned by body
|
| 1756 |
+
original_body_result = body_result
|
| 1757 |
+
# Convert TensorArrays returned by body into their flow variables
|
| 1758 |
+
result = nest.map_structure(
|
| 1759 |
+
_convert_tensorarray_to_flow,
|
| 1760 |
+
nest.flatten(body_result, expand_composites=True),
|
| 1761 |
+
expand_composites=True)
|
| 1762 |
+
result = ops.convert_n_to_tensor_or_composite(result)
|
| 1763 |
+
|
| 1764 |
+
# Add NextIteration and the back edges to complete the loop.
|
| 1765 |
+
if len(merge_vars) != len(result):
|
| 1766 |
+
raise ValueError("Number of inputs and outputs of 'body' must match "
|
| 1767 |
+
f"'loop_vars'. Got {len(merge_vars)} for the number of "
|
| 1768 |
+
f"inputs/outputs, and {len(result)} for 'loop_vars'.")
|
| 1769 |
+
next_vars = []
|
| 1770 |
+
for m, v in zip(merge_vars, result):
|
| 1771 |
+
next_vars.append(_AddNextAndBackEdge(m, v))
|
| 1772 |
+
|
| 1773 |
+
# Add the exit ops.
|
| 1774 |
+
exit_vars = [exit(x[0]) for x in switch_vars]
|
| 1775 |
+
self._loop_exits = exit_vars
|
| 1776 |
+
|
| 1777 |
+
# Exit the loop.
|
| 1778 |
+
self.ExitResult(exit_vars)
|
| 1779 |
+
|
| 1780 |
+
return original_body_result, exit_vars
|
| 1781 |
+
|
| 1782 |
+
def BuildLoop(self, pred, body, loop_vars, shape_invariants,
|
| 1783 |
+
return_same_structure):
|
| 1784 |
+
"""Add the loop termination condition and body to the graph."""
|
| 1785 |
+
|
| 1786 |
+
# Keep flat_orig_loop_vars to identify which are TensorArrays
|
| 1787 |
+
flat_orig_loop_vars = nest.flatten(loop_vars, expand_composites=True)
|
| 1788 |
+
|
| 1789 |
+
loop_vars = nest.map_structure(
|
| 1790 |
+
_convert_to_tensor_or_composite_or_tensorarray, loop_vars)
|
| 1791 |
+
# Convert TensorArrays to their flow variables
|
| 1792 |
+
flat_loop_vars = nest.map_structure(
|
| 1793 |
+
_convert_tensorarray_to_flow,
|
| 1794 |
+
nest.flatten(loop_vars, expand_composites=True))
|
| 1795 |
+
|
| 1796 |
+
if shape_invariants is not None:
|
| 1797 |
+
loop_vars_signature = nest.map_structure(
|
| 1798 |
+
_shape_invariant_to_type_spec, loop_vars, shape_invariants)
|
| 1799 |
+
else:
|
| 1800 |
+
loop_vars_signature = nest.map_structure(
|
| 1801 |
+
_shape_invariant_to_type_spec, loop_vars)
|
| 1802 |
+
|
| 1803 |
+
try:
|
| 1804 |
+
self.Enter()
|
| 1805 |
+
# _BuildLoop calls _update_input in several places. _mutation_lock()
|
| 1806 |
+
# ensures a Session.run call cannot occur between creating and mutating
|
| 1807 |
+
# new ops.
|
| 1808 |
+
with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access
|
| 1809 |
+
original_body_result, exit_vars = self._BuildLoop(
|
| 1810 |
+
pred, body, flat_orig_loop_vars, flat_loop_vars,
|
| 1811 |
+
loop_vars_signature)
|
| 1812 |
+
finally:
|
| 1813 |
+
self.Exit()
|
| 1814 |
+
|
| 1815 |
+
flat_result = nest.flatten(original_body_result, expand_composites=True)
|
| 1816 |
+
# Convert TensorArray flow variables outside the context back into
|
| 1817 |
+
# their associated TensorArrays for returning to caller.
|
| 1818 |
+
exit_vars_with_tensorarrays = nest.map_structure(
|
| 1819 |
+
_convert_flow_to_tensorarray, flat_result, exit_vars)
|
| 1820 |
+
|
| 1821 |
+
packed_exit_vars = nest.pack_sequence_as(
|
| 1822 |
+
structure=original_body_result,
|
| 1823 |
+
flat_sequence=exit_vars_with_tensorarrays,
|
| 1824 |
+
expand_composites=True)
|
| 1825 |
+
|
| 1826 |
+
if return_same_structure:
|
| 1827 |
+
return packed_exit_vars
|
| 1828 |
+
else:
|
| 1829 |
+
return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars
|
| 1830 |
+
|
| 1831 |
+
def _FixControlInputsAndContext(self, enters):
|
| 1832 |
+
graph = ops.get_default_graph()
|
| 1833 |
+
# pylint: disable=protected-access
|
| 1834 |
+
for e in enters:
|
| 1835 |
+
if isinstance(e, tensor_lib.Tensor):
|
| 1836 |
+
xs = [e]
|
| 1837 |
+
else:
|
| 1838 |
+
raise TypeError("'enters' must be a list of Tensors. "
|
| 1839 |
+
f"Received: {type(e)}.")
|
| 1840 |
+
for x in xs:
|
| 1841 |
+
inp_op = x.op.inputs[0].op
|
| 1842 |
+
control_inputs = graph._control_dependencies_for_inputs([inp_op])
|
| 1843 |
+
outer_control_inputs = []
|
| 1844 |
+
for op in control_inputs:
|
| 1845 |
+
# We need to keep control inputs that are in any ancestor
|
| 1846 |
+
# ControlFlowContext, and within outer WhileContext.
|
| 1847 |
+
keep_as_control_input = True
|
| 1848 |
+
op_ctxt = util.GetOutputContext(op)
|
| 1849 |
+
outer_ctxt = self.outer_context
|
| 1850 |
+
outer_while_context = (None if outer_ctxt is None else
|
| 1851 |
+
outer_ctxt.GetWhileContext())
|
| 1852 |
+
while outer_ctxt != op_ctxt:
|
| 1853 |
+
if outer_ctxt is None or outer_ctxt == outer_while_context:
|
| 1854 |
+
keep_as_control_input = False
|
| 1855 |
+
break
|
| 1856 |
+
outer_ctxt = outer_ctxt.outer_context
|
| 1857 |
+
if keep_as_control_input:
|
| 1858 |
+
outer_control_inputs.append(op)
|
| 1859 |
+
x.op._set_control_flow_context(self)
|
| 1860 |
+
x.op._add_control_inputs(outer_control_inputs)
|
| 1861 |
+
graph._record_op_seen_by_control_dependencies(x.op)
|
| 1862 |
+
# pylint: enable=protected-access
|
| 1863 |
+
|
| 1864 |
+
def IsWhileContext(self):
|
| 1865 |
+
return True
|
| 1866 |
+
|
| 1867 |
+
|
| 1868 |
+
# pylint: enable=redefined-outer-name
|
| 1869 |
+
|
| 1870 |
+
|
| 1871 |
+
def _AsTensorList(x, p):
|
| 1872 |
+
"""Return x as a list of Tensors or IndexedSlices.
|
| 1873 |
+
|
| 1874 |
+
For entries of `x` that are Operations, this returns an Identity of `p`
|
| 1875 |
+
with a dependency on the operation.
|
| 1876 |
+
|
| 1877 |
+
Args:
|
| 1878 |
+
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
|
| 1879 |
+
p: A Tensor to return for entries in `x` that are Operations.
|
| 1880 |
+
|
| 1881 |
+
Returns:
|
| 1882 |
+
A list of Tensors or IndexedSlices.
|
| 1883 |
+
"""
|
| 1884 |
+
if not isinstance(x, (list, _basetuple)):
|
| 1885 |
+
x = [x]
|
| 1886 |
+
|
| 1887 |
+
l = []
|
| 1888 |
+
for v in x:
|
| 1889 |
+
if isinstance(v, ops.Operation):
|
| 1890 |
+
v = with_dependencies([v], p)
|
| 1891 |
+
v = ops.convert_to_tensor_or_composite(v)
|
| 1892 |
+
if isinstance(v, tensor_lib.Tensor):
|
| 1893 |
+
l.append(array_ops.identity(v))
|
| 1894 |
+
else:
|
| 1895 |
+
l.append(
|
| 1896 |
+
indexed_slices.IndexedSlices(
|
| 1897 |
+
array_ops.identity(v.values), array_ops.identity(v.indices)))
|
| 1898 |
+
return l
|
| 1899 |
+
|
| 1900 |
+
|
| 1901 |
+
def _CheckResults(a, b):
|
| 1902 |
+
assert len(a) == len(b), (
|
| 1903 |
+
"Values returned by a() and b() must have the same length.")
|
| 1904 |
+
for x, y in zip(a, b):
|
| 1905 |
+
assert x.dtype == y.dtype, (
|
| 1906 |
+
"Values returned by a() [%s] and b() [%s] must have "
|
| 1907 |
+
"the same type: %s, %s." % (x.name, y.name, x.dtype.name, y.dtype.name))
|
| 1908 |
+
|
| 1909 |
+
|
| 1910 |
+
def with_dependencies(dependencies, output_tensor, name=None):
|
| 1911 |
+
"""Produces the content of `output_tensor` only after `dependencies`.
|
| 1912 |
+
|
| 1913 |
+
In some cases, a user may want the output of an operation to be
|
| 1914 |
+
consumed externally only after some other dependencies have run
|
| 1915 |
+
first. This function ensures returns `output_tensor`, but only after all
|
| 1916 |
+
operations in `dependencies` have run. Note that this means that there is
|
| 1917 |
+
no guarantee that `output_tensor` will be evaluated after any `dependencies`
|
| 1918 |
+
have run.
|
| 1919 |
+
|
| 1920 |
+
See also `tf.tuple` and `tf.group`.
|
| 1921 |
+
|
| 1922 |
+
Args:
|
| 1923 |
+
dependencies: Iterable of operations to run before this op finishes.
|
| 1924 |
+
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
|
| 1925 |
+
name: (Optional) A name for this operation.
|
| 1926 |
+
|
| 1927 |
+
Returns:
|
| 1928 |
+
Same as `output_tensor`.
|
| 1929 |
+
|
| 1930 |
+
Raises:
|
| 1931 |
+
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
|
| 1932 |
+
"""
|
| 1933 |
+
if context.executing_eagerly():
|
| 1934 |
+
return output_tensor
|
| 1935 |
+
with ops.name_scope(name, "control_dependency",
|
| 1936 |
+
list(dependencies) + [output_tensor]) as name:
|
| 1937 |
+
with ops.colocate_with(output_tensor):
|
| 1938 |
+
with ops.control_dependencies(dependencies):
|
| 1939 |
+
output_tensor = ops.convert_to_tensor_or_composite(output_tensor)
|
| 1940 |
+
if isinstance(output_tensor, indexed_slices.IndexedSlices):
|
| 1941 |
+
return indexed_slices.IndexedSlices(
|
| 1942 |
+
_Identity(output_tensor.values, name=name), output_tensor.indices,
|
| 1943 |
+
output_tensor.dense_shape)
|
| 1944 |
+
else:
|
| 1945 |
+
return _Identity(output_tensor, name=name)
|
| 1946 |
+
|
| 1947 |
+
|
| 1948 |
+
def _GroupControlDeps(dev, deps, name=None):
|
| 1949 |
+
with ops.control_dependencies(deps):
|
| 1950 |
+
if dev is None:
|
| 1951 |
+
return no_op(name=name)
|
| 1952 |
+
else:
|
| 1953 |
+
with ops.device(dev):
|
| 1954 |
+
return no_op(name=name)
|
| 1955 |
+
|
| 1956 |
+
|
| 1957 |
+
# TODO(touts): Accept "inputs" as a list.
|
| 1958 |
+
@tf_export("group")
|
| 1959 |
+
def group(*inputs, **kwargs):
|
| 1960 |
+
"""Create an op that groups multiple operations.
|
| 1961 |
+
|
| 1962 |
+
When this op finishes, all ops in `inputs` have finished. This op has no
|
| 1963 |
+
output.
|
| 1964 |
+
|
| 1965 |
+
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
|
| 1966 |
+
this method, as ops execute in the expected order thanks to automatic control
|
| 1967 |
+
dependencies.* Only use `tf.group` when working with v1
|
| 1968 |
+
`tf.Graph` code.
|
| 1969 |
+
|
| 1970 |
+
When operating in a v1-style graph context, ops are not executed in the same
|
| 1971 |
+
order as specified in the code; TensorFlow will attempt to execute ops in
|
| 1972 |
+
parallel or in an order convenient to the result it is computing. `tf.group`
|
| 1973 |
+
allows you to request that one or more results finish before execution
|
| 1974 |
+
continues.
|
| 1975 |
+
|
| 1976 |
+
`tf.group` creates a single op (of type `NoOp`), and then adds appropriate
|
| 1977 |
+
control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph
|
| 1978 |
+
as this:
|
| 1979 |
+
|
| 1980 |
+
with tf.control_dependencies([a, b]):
|
| 1981 |
+
c = tf.no_op()
|
| 1982 |
+
|
| 1983 |
+
See also `tf.tuple` and
|
| 1984 |
+
`tf.control_dependencies`.
|
| 1985 |
+
|
| 1986 |
+
Args:
|
| 1987 |
+
*inputs: Zero or more tensors to group.
|
| 1988 |
+
name: A name for this operation (optional).
|
| 1989 |
+
|
| 1990 |
+
Returns:
|
| 1991 |
+
An Operation that executes all its inputs.
|
| 1992 |
+
|
| 1993 |
+
Raises:
|
| 1994 |
+
ValueError: If an unknown keyword argument is provided.
|
| 1995 |
+
"""
|
| 1996 |
+
if context.executing_eagerly():
|
| 1997 |
+
return None
|
| 1998 |
+
name = kwargs.pop("name", None)
|
| 1999 |
+
if kwargs:
|
| 2000 |
+
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
|
| 2001 |
+
with ops.name_scope(name, "group_deps", inputs) as name:
|
| 2002 |
+
# Grouping no inputs means do nothing
|
| 2003 |
+
if not inputs:
|
| 2004 |
+
return no_op(name=name)
|
| 2005 |
+
|
| 2006 |
+
# Sorts *inputs according to their devices.
|
| 2007 |
+
ops_on_device = {} # device -> operations specified on the device.
|
| 2008 |
+
for inp in nest.flatten(inputs, expand_composites=True):
|
| 2009 |
+
if not hasattr(inp, "device"):
|
| 2010 |
+
raise TypeError("'inputs' should be zero or more (nested) Tensors. "
|
| 2011 |
+
f"Received '{inp}' with type '{type(inp)}'.")
|
| 2012 |
+
dev = inp.device
|
| 2013 |
+
if dev in ops_on_device:
|
| 2014 |
+
ops_on_device[dev].append(inp)
|
| 2015 |
+
else:
|
| 2016 |
+
ops_on_device[dev] = [inp]
|
| 2017 |
+
if len(ops_on_device) == 1:
|
| 2018 |
+
# 1-level tree. The root node is the returned NoOp node.
|
| 2019 |
+
(dev, deps), = ops_on_device.items()
|
| 2020 |
+
return _GroupControlDeps(dev, deps, name=name)
|
| 2021 |
+
|
| 2022 |
+
# 2-level tree. The root node is the returned NoOp node.
|
| 2023 |
+
# deps contains 1 NoOp node for each device.
|
| 2024 |
+
deps = []
|
| 2025 |
+
|
| 2026 |
+
def device_key(dev):
|
| 2027 |
+
"""A sort key that allows None to be compared to strings."""
|
| 2028 |
+
return "" if dev is None else dev
|
| 2029 |
+
|
| 2030 |
+
for dev in sorted(ops_on_device, key=device_key):
|
| 2031 |
+
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
|
| 2032 |
+
|
| 2033 |
+
with ops.control_dependencies(deps):
|
| 2034 |
+
return no_op(name=name)
|
| 2035 |
+
|
| 2036 |
+
|
| 2037 |
+
@tf_export("tuple", v1=[])
|
| 2038 |
+
@dispatch.add_dispatch_support
|
| 2039 |
+
def tuple_v2(tensors, control_inputs=None, name=None):
|
| 2040 |
+
"""Groups tensors together.
|
| 2041 |
+
|
| 2042 |
+
The returned tensors have the same value as the input tensors, but they
|
| 2043 |
+
are computed only after all the input tensors have been computed.
|
| 2044 |
+
|
| 2045 |
+
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
|
| 2046 |
+
this method, as ops execute in the expected order thanks to automatic control
|
| 2047 |
+
dependencies.* Only use `tf.tuple` when working with v1 `tf.Graph` code.
|
| 2048 |
+
|
| 2049 |
+
See also `tf.group` and `tf.control_dependencies`.
|
| 2050 |
+
|
| 2051 |
+
Example:
|
| 2052 |
+
>>> with tf.Graph().as_default():
|
| 2053 |
+
... with tf.compat.v1.Session() as sess:
|
| 2054 |
+
... v = tf.Variable(0.0)
|
| 2055 |
+
... a = tf.constant(1.0)
|
| 2056 |
+
... sess.run(tf.compat.v1.global_variables_initializer())
|
| 2057 |
+
... for i in range(5):
|
| 2058 |
+
... update_op = v.assign_add(1.0)
|
| 2059 |
+
... b = a + v
|
| 2060 |
+
... res_b = sess.run(b)
|
| 2061 |
+
... res_v = sess.run(v)
|
| 2062 |
+
... print(res_v)
|
| 2063 |
+
0.0
|
| 2064 |
+
0.0
|
| 2065 |
+
0.0
|
| 2066 |
+
0.0
|
| 2067 |
+
0.0
|
| 2068 |
+
|
| 2069 |
+
>>> with tf.Graph().as_default():
|
| 2070 |
+
... with tf.compat.v1.Session() as sess:
|
| 2071 |
+
... v = tf.Variable(0.0)
|
| 2072 |
+
... a = tf.constant(1.0)
|
| 2073 |
+
... sess.run(tf.compat.v1.global_variables_initializer())
|
| 2074 |
+
... for i in range(5):
|
| 2075 |
+
... update_op = v.assign_add(1.0)
|
| 2076 |
+
... calc = [a + v]
|
| 2077 |
+
... # `tf.tuple` ensures `update_op` is run before `b`
|
| 2078 |
+
... b = tf.tuple(calc, [tf.group(update_op)])
|
| 2079 |
+
... res_b = sess.run(b)
|
| 2080 |
+
... res_v = sess.run(v)
|
| 2081 |
+
... print(res_v)
|
| 2082 |
+
1.0
|
| 2083 |
+
2.0
|
| 2084 |
+
3.0
|
| 2085 |
+
4.0
|
| 2086 |
+
5.0
|
| 2087 |
+
|
| 2088 |
+
|
| 2089 |
+
Args:
|
| 2090 |
+
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
|
| 2091 |
+
control_inputs: List of additional ops to finish before returning.
|
| 2092 |
+
name: (optional) A name to use as a `name_scope` for the operation.
|
| 2093 |
+
|
| 2094 |
+
Returns:
|
| 2095 |
+
Same as `tensors`.
|
| 2096 |
+
|
| 2097 |
+
Raises:
|
| 2098 |
+
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
|
| 2099 |
+
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
|
| 2100 |
+
objects.
|
| 2101 |
+
|
| 2102 |
+
"""
|
| 2103 |
+
return tuple(tensors=tensors, name=name, control_inputs=control_inputs) # pylint: disable=redefined-builtin
|
| 2104 |
+
|
| 2105 |
+
|
| 2106 |
+
@tf_export(v1=["tuple"])
|
| 2107 |
+
@dispatch.add_dispatch_support
|
| 2108 |
+
def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin
|
| 2109 |
+
"""Group tensors together.
|
| 2110 |
+
|
| 2111 |
+
This creates a tuple of tensors with the same values as the `tensors`
|
| 2112 |
+
argument, except that the value of each tensor is only returned after the
|
| 2113 |
+
values of all tensors have been computed.
|
| 2114 |
+
|
| 2115 |
+
`control_inputs` contains additional ops that have to finish before this op
|
| 2116 |
+
finishes, but whose outputs are not returned.
|
| 2117 |
+
|
| 2118 |
+
This can be used as a "join" mechanism for parallel computations: all the
|
| 2119 |
+
argument tensors can be computed in parallel, but the values of any tensor
|
| 2120 |
+
returned by `tuple` are only available after all the parallel computations
|
| 2121 |
+
are done.
|
| 2122 |
+
|
| 2123 |
+
See also `tf.group` and
|
| 2124 |
+
`tf.control_dependencies`.
|
| 2125 |
+
|
| 2126 |
+
Args:
|
| 2127 |
+
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
|
| 2128 |
+
name: (optional) A name to use as a `name_scope` for the operation.
|
| 2129 |
+
control_inputs: List of additional ops to finish before returning.
|
| 2130 |
+
|
| 2131 |
+
Returns:
|
| 2132 |
+
Same as `tensors`.
|
| 2133 |
+
|
| 2134 |
+
Raises:
|
| 2135 |
+
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
|
| 2136 |
+
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
|
| 2137 |
+
objects.
|
| 2138 |
+
|
| 2139 |
+
"""
|
| 2140 |
+
if context.executing_eagerly():
|
| 2141 |
+
return tensors
|
| 2142 |
+
with ops.name_scope(name, "tuple", tensors) as name:
|
| 2143 |
+
tensors = [
|
| 2144 |
+
t if (isinstance(t, ops.Operation) or tensor_util.is_tf_type(t) or
|
| 2145 |
+
t is None) else ops.convert_to_tensor(t) for t in tensors
|
| 2146 |
+
]
|
| 2147 |
+
gating_ops = [
|
| 2148 |
+
t if isinstance(t, ops.Operation) else t.op
|
| 2149 |
+
for t in tensors
|
| 2150 |
+
if t is not None
|
| 2151 |
+
]
|
| 2152 |
+
if control_inputs:
|
| 2153 |
+
for c in control_inputs:
|
| 2154 |
+
if isinstance(c, tensor_lib.Tensor):
|
| 2155 |
+
c = c.op
|
| 2156 |
+
elif not isinstance(c, ops.Operation):
|
| 2157 |
+
raise TypeError(
|
| 2158 |
+
"'control_inputs' must only contain Operation or Tensor. "
|
| 2159 |
+
f"Received: {type(c)}")
|
| 2160 |
+
gating_ops.append(c)
|
| 2161 |
+
# Note that in order to ensure ordering in the pbtxt, we must take care to
|
| 2162 |
+
# ensure the order here.
|
| 2163 |
+
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
|
| 2164 |
+
if not gating_ops:
|
| 2165 |
+
raise ValueError("'tensors' must have at least one Tensor. "
|
| 2166 |
+
f"Received: {tensors}.")
|
| 2167 |
+
gate = group(*gating_ops)
|
| 2168 |
+
tpl = []
|
| 2169 |
+
for t in tensors:
|
| 2170 |
+
if tensor_util.is_tf_type(t):
|
| 2171 |
+
tpl.append(with_dependencies([gate], t))
|
| 2172 |
+
elif isinstance(t, ops.Operation):
|
| 2173 |
+
with ops.control_dependencies([gate]):
|
| 2174 |
+
tpl.append(group(t))
|
| 2175 |
+
else:
|
| 2176 |
+
tpl.append(None)
|
| 2177 |
+
return tpl
|
| 2178 |
+
|
| 2179 |
+
|
| 2180 |
+
class XLAControlFlowContext(ControlFlowContext):
|
| 2181 |
+
"""Base class for XLA and TPU control flow contexts."""
|
| 2182 |
+
|
| 2183 |
+
def __init__(self):
|
| 2184 |
+
super(XLAControlFlowContext, self).__init__()
|
| 2185 |
+
self._name = "XLAControlFlowContext"
|
| 2186 |
+
|
| 2187 |
+
def to_control_flow_context_def(self, context_def, export_scope=None):
|
| 2188 |
+
# pylint: disable=useless-super-delegation
|
| 2189 |
+
# NOTE(slebedev): the method is required by `ControlFlowContext`.
|
| 2190 |
+
super(XLAControlFlowContext,
|
| 2191 |
+
self).to_control_flow_context_def(context_def, export_scope)
|
| 2192 |
+
|
| 2193 |
+
def IsXLAContext(self):
|
| 2194 |
+
return True
|
| 2195 |
+
|
| 2196 |
+
def AddOp(self, _):
|
| 2197 |
+
pass
|
| 2198 |
+
|
| 2199 |
+
def AddValue(self, x):
|
| 2200 |
+
return x
|
| 2201 |
+
|
| 2202 |
+
def RequiresUniqueFunctionRetracing(self):
|
| 2203 |
+
"""Returns whether the tf.function should be retraced if the context changes.
|
| 2204 |
+
"""
|
| 2205 |
+
return False
|
| 2206 |
+
|
| 2207 |
+
|
| 2208 |
+
@tf_export("__internal__.get_enclosing_xla_context", v1=[])
|
| 2209 |
+
def get_enclosing_xla_context():
|
| 2210 |
+
"""Recursively find and return the XLAControlFlowContext."""
|
| 2211 |
+
graph = ops.get_default_graph()
|
| 2212 |
+
while graph is not None:
|
| 2213 |
+
# pylint: disable=protected-access
|
| 2214 |
+
context_ = graph._get_control_flow_context()
|
| 2215 |
+
# pylint: enable=protected-access
|
| 2216 |
+
while context_ is not None:
|
| 2217 |
+
if isinstance(context_, XLAControlFlowContext):
|
| 2218 |
+
return context_
|
| 2219 |
+
context_ = context_.outer_context
|
| 2220 |
+
# This may be a FuncGraph due to defuns or v2 control flow. We need to
|
| 2221 |
+
# find the original graph with the XLAControlFlowContext.
|
| 2222 |
+
graph = getattr(graph, "outer_graph", None)
|
| 2223 |
+
return None
|
| 2224 |
+
|
| 2225 |
+
|
| 2226 |
+
def from_control_flow_context_def(context_def, import_scope=None):
|
| 2227 |
+
"""Deserializes `context_def` into the appropriate ControlFlowContext.
|
| 2228 |
+
|
| 2229 |
+
Args:
|
| 2230 |
+
context_def: ControlFlowContextDef proto
|
| 2231 |
+
import_scope: Optional `string`. Name scope to add.
|
| 2232 |
+
|
| 2233 |
+
Returns:
|
| 2234 |
+
A ControlFlowContext subclass
|
| 2235 |
+
"""
|
| 2236 |
+
if context_def.HasField("cond_ctxt"):
|
| 2237 |
+
return CondContext.from_proto(
|
| 2238 |
+
context_def.cond_ctxt, import_scope=import_scope)
|
| 2239 |
+
if context_def.HasField("while_ctxt"):
|
| 2240 |
+
return WhileContext.from_proto(
|
| 2241 |
+
context_def.while_ctxt, import_scope=import_scope)
|
| 2242 |
+
raise NotImplementedError("Unknown ControlFlowContextDef field: %s" %
|
| 2243 |
+
context_def.WhichOneof("ctxt"))
|
| 2244 |
+
|
| 2245 |
+
|
| 2246 |
+
ops.register_proto_function(
|
| 2247 |
+
ops.GraphKeys.COND_CONTEXT,
|
| 2248 |
+
proto_type=control_flow_pb2.CondContextDef,
|
| 2249 |
+
to_proto=CondContext.to_proto,
|
| 2250 |
+
from_proto=CondContext.from_proto)
|
| 2251 |
+
|
| 2252 |
+
ops.register_proto_function(
|
| 2253 |
+
ops.GraphKeys.WHILE_CONTEXT,
|
| 2254 |
+
proto_type=control_flow_pb2.WhileContextDef,
|
| 2255 |
+
to_proto=WhileContext.to_proto,
|
| 2256 |
+
from_proto=WhileContext.from_proto)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_switch_case.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Switch case for Control Flow Operations."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.eager import context
|
| 18 |
+
from tensorflow.python.framework import ops
|
| 19 |
+
from tensorflow.python.framework import tensor
|
| 20 |
+
from tensorflow.python.ops import array_ops
|
| 21 |
+
from tensorflow.python.ops import cond_v2
|
| 22 |
+
from tensorflow.python.ops import control_flow_util as util
|
| 23 |
+
from tensorflow.python.ops import gen_functional_ops
|
| 24 |
+
from tensorflow.python.ops import math_ops
|
| 25 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _indexed_case_verify_and_canonicalize_args(branch_fns, default,
|
| 29 |
+
branch_index):
|
| 30 |
+
"""Verifies input arguments for the case function.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
branch_fns: Dict or list of pairs of an `int` and a callable which returns a
|
| 34 |
+
list of tensors.
|
| 35 |
+
default: Optional callable that returns a list of tensors.
|
| 36 |
+
branch_index: Optional int `Tensor`, which selects for the corresponding
|
| 37 |
+
pred_fn_pair.
|
| 38 |
+
|
| 39 |
+
Raises:
|
| 40 |
+
TypeError: If `branch_fns` is not a list/dictionary.
|
| 41 |
+
TypeError: If `branch_fns` is a list but does not contain 2-tuples or
|
| 42 |
+
callables.
|
| 43 |
+
TypeError: If `fns[i]` is not callable for any i, or `default` is not
|
| 44 |
+
callable.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
branch_fns: validated list of callables for each branch (default last).
|
| 48 |
+
"""
|
| 49 |
+
if not isinstance(branch_index, tensor.Tensor):
|
| 50 |
+
raise TypeError("'branch_index' must be a Tensor, got {}".format(
|
| 51 |
+
type(branch_index)))
|
| 52 |
+
if not branch_index.dtype.is_integer:
|
| 53 |
+
raise TypeError("'branch_index' must be an integer Tensor, got {}".format(
|
| 54 |
+
branch_index.dtype))
|
| 55 |
+
|
| 56 |
+
if not branch_fns:
|
| 57 |
+
raise ValueError("Must provide at least one item in 'branch_fns'")
|
| 58 |
+
if not isinstance(branch_fns, (list, tuple, dict)):
|
| 59 |
+
raise TypeError("'branch_fns' must be a list, tuple, or dict")
|
| 60 |
+
|
| 61 |
+
if isinstance(branch_fns, dict):
|
| 62 |
+
branch_fns = branch_fns.items()
|
| 63 |
+
|
| 64 |
+
if all(callable(fn) for fn in branch_fns):
|
| 65 |
+
branch_fns = list(enumerate(branch_fns))
|
| 66 |
+
|
| 67 |
+
for key_fn_pair in branch_fns:
|
| 68 |
+
if not isinstance(key_fn_pair, tuple) or len(key_fn_pair) != 2:
|
| 69 |
+
raise TypeError("Each entry in 'branch_fns' must be a 2-tuple. "
|
| 70 |
+
f"Received {key_fn_pair}.")
|
| 71 |
+
key, branch_fn = key_fn_pair
|
| 72 |
+
|
| 73 |
+
if not isinstance(key, int):
|
| 74 |
+
raise TypeError("key must be a Python `int`, got {}".format(type(key)))
|
| 75 |
+
|
| 76 |
+
if not callable(branch_fn):
|
| 77 |
+
raise TypeError("fn for key {} must be callable.".format(key))
|
| 78 |
+
|
| 79 |
+
keys = [p[0] for p in branch_fns]
|
| 80 |
+
if min(keys) < 0 or max(keys) >= len(keys) or len(set(keys)) != len(keys):
|
| 81 |
+
raise ValueError(
|
| 82 |
+
"branch indices (keys) must form contiguous range of [0 to {}) but "
|
| 83 |
+
"found {{{}}}".format(len(keys), ",".join(map(str, sorted(keys)))))
|
| 84 |
+
actions = [p[1] for p in sorted(branch_fns)]
|
| 85 |
+
if default is not None:
|
| 86 |
+
actions.append(default)
|
| 87 |
+
return actions
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _indexed_case_helper(branch_fns,
|
| 91 |
+
default,
|
| 92 |
+
branch_index,
|
| 93 |
+
name,
|
| 94 |
+
lower_using_switch_merge=None):
|
| 95 |
+
"""Implementation of case that emits the n-way indexed Case op.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable
|
| 99 |
+
which returns a list of tensors.
|
| 100 |
+
default: Optional callable that returns a list of tensors.
|
| 101 |
+
branch_index: Optional int `Tensor`, which selects for the corresponding
|
| 102 |
+
pred_fn_pair.
|
| 103 |
+
name: A name for this operation (optional).
|
| 104 |
+
lower_using_switch_merge: Lower this op using switch merge ops (optional).
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
The tensors returned by the pair whose key matched branch_index, or
|
| 108 |
+
those returned by `default` if none does.
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
TypeError: If `branch_fns` is not a list/dictionary.
|
| 112 |
+
TypeError: If `branch_fns` is a list but does not contain 2-tuples or
|
| 113 |
+
callables.
|
| 114 |
+
TypeError: If `fns[i]` is not callable for any i, or `default` is not
|
| 115 |
+
callable.
|
| 116 |
+
"""
|
| 117 |
+
branch_fns = _indexed_case_verify_and_canonicalize_args(
|
| 118 |
+
branch_fns, default, branch_index)
|
| 119 |
+
with ops.name_scope(name, "case", [branch_index]):
|
| 120 |
+
if context.executing_eagerly() and not hasattr(branch_index, "graph"):
|
| 121 |
+
branch_index = array_ops.where(
|
| 122 |
+
math_ops.less(branch_index, 0)
|
| 123 |
+
| math_ops.greater_equal(branch_index, len(branch_fns)),
|
| 124 |
+
len(branch_fns) - 1, branch_index)
|
| 125 |
+
return branch_fns[int(branch_index)]()
|
| 126 |
+
return cond_v2.indexed_case(
|
| 127 |
+
branch_index,
|
| 128 |
+
branch_fns,
|
| 129 |
+
lower_using_switch_merge=lower_using_switch_merge)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@tf_export("__internal__.execute_fn_for_device", v1=[])
|
| 133 |
+
def execute_fn_for_device(device_branch_fns, default_fn, name="execute_fn"):
|
| 134 |
+
"""Executes one of the provided callables based on the device placement.
|
| 135 |
+
|
| 136 |
+
This API is used when the implementations for high level function depend on
|
| 137 |
+
the underlying device placement. It takes a dictionary of device type to
|
| 138 |
+
callables. The device type includes "CPU", "GPU", "TPU", etc. When the type of
|
| 139 |
+
the device where to run this op matches the key in 'device_branch_fns',
|
| 140 |
+
the corresponding callable is executed, falling back to 'default_fn' if none
|
| 141 |
+
matches.
|
| 142 |
+
|
| 143 |
+
**Example:**
|
| 144 |
+
```python
|
| 145 |
+
def f1(): return tf.constant(1)
|
| 146 |
+
def f2(): return tf.constant(2)
|
| 147 |
+
r = tf.execute_fn_for_device({"CPU": f1, "GPU": f2}, default_fn=f1)
|
| 148 |
+
```
|
| 149 |
+
'r' is evaluated as 1 when it runs on CPU, 2 running on GPU, 1 running on
|
| 150 |
+
any other device types.
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
device_branch_fns: a dictionary of device types to the callables. Each
|
| 155 |
+
callable must return a matching structure of tensors.
|
| 156 |
+
default_fn: fallback callable when the underlying device does not match any
|
| 157 |
+
key in the 'device_branch_fns'.
|
| 158 |
+
name: A name for this operation (optional).
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
The tensors returned by the callable identified by device type during
|
| 162 |
+
execution, or those returned by 'default_fn' if no key matches.
|
| 163 |
+
"""
|
| 164 |
+
# Always execute the default fn for XLA to avoid complicated graph by case op.
|
| 165 |
+
# see more discussions in b/167276293.
|
| 166 |
+
is_in_xla = util.GraphOrParentsInXlaContext(ops.get_default_graph())
|
| 167 |
+
if is_in_xla:
|
| 168 |
+
return default_fn()
|
| 169 |
+
device_branch_fns_upper = {k.upper(): v for k, v in device_branch_fns.items()}
|
| 170 |
+
branch_fns = list(device_branch_fns_upper.values())
|
| 171 |
+
devices = list(device_branch_fns_upper.keys())
|
| 172 |
+
device_index = gen_functional_ops.device_index(device_names=devices)
|
| 173 |
+
return _indexed_case_helper(
|
| 174 |
+
branch_fns,
|
| 175 |
+
default_fn,
|
| 176 |
+
device_index,
|
| 177 |
+
name,
|
| 178 |
+
lower_using_switch_merge=False)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
@tf_export("switch_case")
|
| 182 |
+
def switch_case(branch_index, branch_fns, default=None, name="switch_case"):
|
| 183 |
+
"""Create a switch/case operation, i.e.
|
| 184 |
+
|
| 185 |
+
an integer-indexed conditional.
|
| 186 |
+
|
| 187 |
+
See also `tf.case`.
|
| 188 |
+
|
| 189 |
+
This op can be substantially more efficient than `tf.case` when exactly one
|
| 190 |
+
branch will be selected. `tf.switch_case` is more like a C++ switch/case
|
| 191 |
+
statement than `tf.case`, which is more like an if/elif/elif/else chain.
|
| 192 |
+
|
| 193 |
+
The `branch_fns` parameter is either a dict from `int` to callables, or list
|
| 194 |
+
of (`int`, callable) pairs, or simply a list of callables (in which case the
|
| 195 |
+
index is implicitly the key). The `branch_index` `Tensor` is used to select an
|
| 196 |
+
element in `branch_fns` with matching `int` key, falling back to `default`
|
| 197 |
+
if none match, or `max(keys)` if no `default` is provided. The keys must form
|
| 198 |
+
a contiguous set from `0` to `len(branch_fns) - 1`.
|
| 199 |
+
|
| 200 |
+
`tf.switch_case` supports nested structures as implemented in `tf.nest`. All
|
| 201 |
+
callables must return the same (possibly nested) value structure of lists,
|
| 202 |
+
tuples, and/or named tuples.
|
| 203 |
+
|
| 204 |
+
**Example:**
|
| 205 |
+
|
| 206 |
+
Pseudocode:
|
| 207 |
+
|
| 208 |
+
```c++
|
| 209 |
+
switch (branch_index) { // c-style switch
|
| 210 |
+
case 0: return 17;
|
| 211 |
+
case 1: return 31;
|
| 212 |
+
default: return -1;
|
| 213 |
+
}
|
| 214 |
+
```
|
| 215 |
+
or
|
| 216 |
+
```python
|
| 217 |
+
branches = {0: lambda: 17, 1: lambda: 31}
|
| 218 |
+
branches.get(branch_index, lambda: -1)()
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
Expressions:
|
| 222 |
+
|
| 223 |
+
```python
|
| 224 |
+
def f1(): return tf.constant(17)
|
| 225 |
+
def f2(): return tf.constant(31)
|
| 226 |
+
def f3(): return tf.constant(-1)
|
| 227 |
+
r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3)
|
| 228 |
+
# Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3})
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
branch_index: An int Tensor specifying which of `branch_fns` should be
|
| 233 |
+
executed.
|
| 234 |
+
branch_fns: A `dict` mapping `int`s to callables, or a `list` of (`int`,
|
| 235 |
+
callable) pairs, or simply a list of callables (in which case the index
|
| 236 |
+
serves as the key). Each callable must return a matching structure of
|
| 237 |
+
tensors.
|
| 238 |
+
default: Optional callable that returns a structure of tensors.
|
| 239 |
+
name: A name for this operation (optional).
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
The tensors returned by the callable identified by `branch_index`, or those
|
| 243 |
+
returned by `default` if no key matches and `default` was provided, or those
|
| 244 |
+
returned by the max-keyed `branch_fn` if no `default` is provided.
|
| 245 |
+
|
| 246 |
+
Raises:
|
| 247 |
+
TypeError: If `branch_fns` is not a list/dictionary.
|
| 248 |
+
TypeError: If `branch_fns` is a list but does not contain 2-tuples or
|
| 249 |
+
callables.
|
| 250 |
+
TypeError: If `fns[i]` is not callable for any i, or `default` is not
|
| 251 |
+
callable.
|
| 252 |
+
"""
|
| 253 |
+
return _indexed_case_helper(branch_fns, default, branch_index, name)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_func_graphs.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""FuncGraphs for V2 control flow."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.framework import func_graph
|
| 18 |
+
from tensorflow.python.framework import ops
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ControlFlowFuncGraph(func_graph.FuncGraph):
|
| 22 |
+
"""Contains control flow-specific FuncGraph logic."""
|
| 23 |
+
|
| 24 |
+
def __init__(self, *args, **kwargs):
|
| 25 |
+
super(ControlFlowFuncGraph, self).__init__(*args, **kwargs)
|
| 26 |
+
outer_graph = self.outer_graph
|
| 27 |
+
# Unlike tf.function, control flow FuncGraphs are generally created one per
|
| 28 |
+
# op. This means hard-coding any outer device scopes in the body (rather
|
| 29 |
+
# than inspecting the call-time placement of the control flow op) makes
|
| 30 |
+
# sense.
|
| 31 |
+
self._device_function_stack = outer_graph._device_function_stack.copy() # pylint: disable=protected-access
|
| 32 |
+
self.is_control_flow_graph = True
|
| 33 |
+
if ops.executing_eagerly_outside_functions():
|
| 34 |
+
func_graph.override_func_graph_name_scope(
|
| 35 |
+
self, self.outer_graph.get_name_scope())
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class CondBranchFuncGraph(ControlFlowFuncGraph):
|
| 39 |
+
"""FuncGraph for branches of tf.cond().
|
| 40 |
+
|
| 41 |
+
This is used to distinguish cond branches from other functions.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class WhileCondFuncGraph(ControlFlowFuncGraph):
|
| 46 |
+
"""FuncGraph for the condition of tf.while_loop().
|
| 47 |
+
|
| 48 |
+
This is used to distinguish while conditions from other functions.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class WhileBodyFuncGraph(ControlFlowFuncGraph):
|
| 53 |
+
"""FuncGraph for the body of tf.while_loop().
|
| 54 |
+
|
| 55 |
+
This is used to distinguish while bodies from other functions.
|
| 56 |
+
"""
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_toggles.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""API for enabling v2 control flow."""
|
| 17 |
+
|
| 18 |
+
from tensorflow.python.framework import ops
|
| 19 |
+
from tensorflow.python.ops import control_flow_util
|
| 20 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 21 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@tf_export(v1=["enable_control_flow_v2"])
|
| 25 |
+
def enable_control_flow_v2(): # pylint: disable=invalid-name
|
| 26 |
+
"""Use control flow v2.
|
| 27 |
+
|
| 28 |
+
control flow v2 (cfv2) is an improved version of control flow in TensorFlow
|
| 29 |
+
with support for higher order derivatives. Enabling cfv2 will change the
|
| 30 |
+
graph/function representation of control flow, e.g., `tf.while_loop` and
|
| 31 |
+
`tf.cond` will generate functional `While` and `If` ops instead of low-level
|
| 32 |
+
`Switch`, `Merge` etc. ops. Note: Importing and running graphs exported
|
| 33 |
+
with old control flow will still be supported.
|
| 34 |
+
|
| 35 |
+
Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0
|
| 36 |
+
feature.
|
| 37 |
+
|
| 38 |
+
Note: v2 control flow is always enabled inside of tf.function. Calling this
|
| 39 |
+
function is not required.
|
| 40 |
+
"""
|
| 41 |
+
# pylint: disable=protected-access
|
| 42 |
+
logging.vlog(1, "Enabling control flow v2")
|
| 43 |
+
ops._control_flow_api_gauge.get_cell().set(True)
|
| 44 |
+
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@tf_export(v1=["disable_control_flow_v2"])
|
| 48 |
+
def disable_control_flow_v2(): # pylint: disable=invalid-name
|
| 49 |
+
"""Opts out of control flow v2.
|
| 50 |
+
|
| 51 |
+
Note: v2 control flow is always enabled inside of tf.function. Calling this
|
| 52 |
+
function has no effect in that case.
|
| 53 |
+
|
| 54 |
+
If your code needs tf.disable_control_flow_v2() to be called to work
|
| 55 |
+
properly please file a bug.
|
| 56 |
+
"""
|
| 57 |
+
# pylint: disable=protected-access
|
| 58 |
+
logging.vlog(1, "Disabling control flow v2")
|
| 59 |
+
ops._control_flow_api_gauge.get_cell().set(False)
|
| 60 |
+
control_flow_util.ENABLE_CONTROL_FLOW_V2 = False
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@tf_export(v1=["control_flow_v2_enabled"])
|
| 64 |
+
def control_flow_v2_enabled(): # pylint: disable=invalid-name
|
| 65 |
+
"""Returns `True` if v2 control flow is enabled.
|
| 66 |
+
|
| 67 |
+
Note: v2 control flow is always enabled inside of tf.function.
|
| 68 |
+
"""
|
| 69 |
+
return control_flow_util.EnableControlFlowV2(ops.get_default_graph())
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/custom_gradient.py
ADDED
|
@@ -0,0 +1,823 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Decorator to overrides the gradient for a function."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.eager import backprop
|
| 18 |
+
from tensorflow.python.eager import context
|
| 19 |
+
from tensorflow.python.eager import record
|
| 20 |
+
from tensorflow.python.framework import composite_tensor_gradient
|
| 21 |
+
from tensorflow.python.framework import dtypes
|
| 22 |
+
from tensorflow.python.framework import ops
|
| 23 |
+
from tensorflow.python.ops import array_ops
|
| 24 |
+
from tensorflow.python.ops import gen_array_ops
|
| 25 |
+
from tensorflow.python.ops import handle_data_util
|
| 26 |
+
from tensorflow.python.ops import math_ops
|
| 27 |
+
from tensorflow.python.ops import op_selector
|
| 28 |
+
from tensorflow.python.ops import resource_variable_ops
|
| 29 |
+
from tensorflow.python.ops import variable_scope
|
| 30 |
+
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
|
| 31 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 32 |
+
from tensorflow.python.util import nest
|
| 33 |
+
from tensorflow.python.util import tf_decorator
|
| 34 |
+
from tensorflow.python.util import tf_inspect
|
| 35 |
+
from tensorflow.python.util import variable_utils
|
| 36 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
VAR_OP_TYPES = [
|
| 40 |
+
"VariableV2",
|
| 41 |
+
"VarHandleOp",
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@tf_export("custom_gradient")
|
| 46 |
+
def custom_gradient(f=None):
|
| 47 |
+
"""Decorator to define a function with a custom gradient.
|
| 48 |
+
|
| 49 |
+
This decorator allows fine grained control over the gradients of a sequence
|
| 50 |
+
for operations. This may be useful for multiple reasons, including providing
|
| 51 |
+
a more efficient or numerically stable gradient for a sequence of operations.
|
| 52 |
+
|
| 53 |
+
For example, consider the following function that commonly occurs in the
|
| 54 |
+
computation of cross entropy and log likelihoods:
|
| 55 |
+
|
| 56 |
+
```python
|
| 57 |
+
def log1pexp(x):
|
| 58 |
+
return tf.math.log(1 + tf.exp(x))
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
Due to numerical instability, the gradient of this function evaluated at x=100
|
| 62 |
+
is NaN. For example:
|
| 63 |
+
|
| 64 |
+
```python
|
| 65 |
+
with tf.GradientTape() as tape:
|
| 66 |
+
tape.watch(x)
|
| 67 |
+
y=log1pexp(x)
|
| 68 |
+
dy_dx = tape.gradient(y, x) # Will be NaN when evaluated.
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
The gradient expression can be analytically simplified to provide numerical
|
| 72 |
+
stability:
|
| 73 |
+
|
| 74 |
+
```python
|
| 75 |
+
@tf.custom_gradient
|
| 76 |
+
def log1pexp(x):
|
| 77 |
+
e = tf.exp(x)
|
| 78 |
+
def grad(upstream):
|
| 79 |
+
return upstream * (1 - 1 / (1 + e))
|
| 80 |
+
return tf.math.log(1 + e), grad
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
With this definition, the gradient `dy_dx` at `x = 100` will be correctly
|
| 84 |
+
evaluated as 1.0.
|
| 85 |
+
|
| 86 |
+
The variable `upstream` is defined as the upstream gradient. i.e. the gradient
|
| 87 |
+
from all the layers or functions originating from this layer. The above
|
| 88 |
+
example has no upstream functions, therefore `upstream = dy/dy = 1.0`.
|
| 89 |
+
|
| 90 |
+
Assume that `x_i` is `log1pexp` in the forward pass `x_1 = x_1(x_0)`,
|
| 91 |
+
`x_2 = x_2(x_1)`, ..., `x_i = x_i(x_i-1)`, ..., `x_n = x_n(x_n-1)`. By
|
| 92 |
+
chain rule we know that `dx_n/dx_0 = dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... *
|
| 93 |
+
dx_i/dx_i-1 * ... * dx_1/dx_0`.
|
| 94 |
+
|
| 95 |
+
In this case the gradient of our current function defined as
|
| 96 |
+
`dx_i/dx_i-1 = (exp(x_i) / (1 + exp(x_i))) = (1 - 1 / (1 + exp(x_i)))`. The
|
| 97 |
+
upstream gradient `upstream` would be `dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... *
|
| 98 |
+
dx_i+1/dx_i`. The upstream gradient multiplied by the current gradient is
|
| 99 |
+
then passed downstream.
|
| 100 |
+
|
| 101 |
+
In case the function takes multiple variables as input, the `grad`
|
| 102 |
+
function must also return the same number of variables.
|
| 103 |
+
We take the function `z = x * y` as an example.
|
| 104 |
+
|
| 105 |
+
>>> @tf.custom_gradient
|
| 106 |
+
... def bar(x, y):
|
| 107 |
+
... def grad(upstream):
|
| 108 |
+
... dz_dx = y
|
| 109 |
+
... dz_dy = x
|
| 110 |
+
... return upstream * dz_dx, upstream * dz_dy
|
| 111 |
+
... z = x * y
|
| 112 |
+
... return z, grad
|
| 113 |
+
>>> x = tf.constant(2.0, dtype=tf.float32)
|
| 114 |
+
>>> y = tf.constant(3.0, dtype=tf.float32)
|
| 115 |
+
>>> with tf.GradientTape(persistent=True) as tape:
|
| 116 |
+
... tape.watch(x)
|
| 117 |
+
... tape.watch(y)
|
| 118 |
+
... z = bar(x, y)
|
| 119 |
+
>>> z
|
| 120 |
+
<tf.Tensor: shape=(), dtype=float32, numpy=6.0>
|
| 121 |
+
>>> tape.gradient(z, x)
|
| 122 |
+
<tf.Tensor: shape=(), dtype=float32, numpy=3.0>
|
| 123 |
+
>>> tape.gradient(z, y)
|
| 124 |
+
<tf.Tensor: shape=(), dtype=float32, numpy=2.0>
|
| 125 |
+
|
| 126 |
+
Nesting custom gradients can lead to unintuitive results. The default
|
| 127 |
+
behavior does not correspond to n-th order derivatives. For example
|
| 128 |
+
|
| 129 |
+
```python
|
| 130 |
+
@tf.custom_gradient
|
| 131 |
+
def op(x):
|
| 132 |
+
y = op1(x)
|
| 133 |
+
@tf.custom_gradient
|
| 134 |
+
def grad_fn(dy):
|
| 135 |
+
gdy = op2(x, y, dy)
|
| 136 |
+
def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.
|
| 137 |
+
return op3(x, y, dy, ddy)
|
| 138 |
+
return gdy, grad_grad_fn
|
| 139 |
+
return y, grad_fn
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
The function `grad_grad_fn` will be calculating the first order gradient
|
| 143 |
+
of `grad_fn` with respect to `dy`, which is used to generate forward-mode
|
| 144 |
+
gradient graphs from backward-mode gradient graphs, but is not the same as
|
| 145 |
+
the second order gradient of `op` with respect to `x`.
|
| 146 |
+
|
| 147 |
+
Instead, wrap nested `@tf.custom_gradients` in another function:
|
| 148 |
+
|
| 149 |
+
```python
|
| 150 |
+
@tf.custom_gradient
|
| 151 |
+
def op_with_fused_backprop(x):
|
| 152 |
+
y, x_grad = fused_op(x)
|
| 153 |
+
def first_order_gradient(dy):
|
| 154 |
+
@tf.custom_gradient
|
| 155 |
+
def first_order_custom(unused_x):
|
| 156 |
+
def second_order_and_transpose(ddy):
|
| 157 |
+
return second_order_for_x(...), gradient_wrt_dy(...)
|
| 158 |
+
return x_grad, second_order_and_transpose
|
| 159 |
+
return dy * first_order_custom(x)
|
| 160 |
+
return y, first_order_gradient
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
Additional arguments to the inner `@tf.custom_gradient`-decorated function
|
| 164 |
+
control the expected return values of the innermost function.
|
| 165 |
+
|
| 166 |
+
The examples above illustrate how to specify custom gradients for functions
|
| 167 |
+
which do not read from variables. The following example uses variables, which
|
| 168 |
+
require special handling because they are effectively inputs of the forward
|
| 169 |
+
function.
|
| 170 |
+
|
| 171 |
+
>>> weights = tf.Variable(tf.ones([2])) # Trainable variable weights
|
| 172 |
+
>>> @tf.custom_gradient
|
| 173 |
+
... def linear_poly(x):
|
| 174 |
+
... # Creating polynomial
|
| 175 |
+
... poly = weights[1] * x + weights[0]
|
| 176 |
+
...
|
| 177 |
+
... def grad_fn(dpoly, variables):
|
| 178 |
+
... # dy/dx = weights[1] and we need to left multiply dpoly
|
| 179 |
+
... grad_xs = dpoly * weights[1] # Scalar gradient
|
| 180 |
+
...
|
| 181 |
+
... grad_vars = [] # To store gradients of passed variables
|
| 182 |
+
... assert variables is not None
|
| 183 |
+
... assert len(variables) == 1
|
| 184 |
+
... assert variables[0] is weights
|
| 185 |
+
... # Manually computing dy/dweights
|
| 186 |
+
... dy_dw = dpoly * tf.stack([x ** 1, x ** 0])
|
| 187 |
+
... grad_vars.append(
|
| 188 |
+
... tf.reduce_sum(tf.reshape(dy_dw, [2, -1]), axis=1)
|
| 189 |
+
... )
|
| 190 |
+
... return grad_xs, grad_vars
|
| 191 |
+
... return poly, grad_fn
|
| 192 |
+
>>> x = tf.constant([1., 2., 3.])
|
| 193 |
+
>>> with tf.GradientTape(persistent=True) as tape:
|
| 194 |
+
... tape.watch(x)
|
| 195 |
+
... poly = linear_poly(x)
|
| 196 |
+
>>> poly # poly = x + 1
|
| 197 |
+
<tf.Tensor: shape=(3,),
|
| 198 |
+
dtype=float32,
|
| 199 |
+
numpy=array([2., 3., 4.], dtype=float32)>
|
| 200 |
+
>>> tape.gradient(poly, x) # conventional scalar gradient dy/dx
|
| 201 |
+
<tf.Tensor: shape=(3,),
|
| 202 |
+
dtype=float32,
|
| 203 |
+
numpy=array([1., 1., 1.], dtype=float32)>
|
| 204 |
+
>>> tape.gradient(poly, weights)
|
| 205 |
+
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 3.], dtype=float32)>
|
| 206 |
+
|
| 207 |
+
Above example illustrates usage of trainable variable `weights`.
|
| 208 |
+
In the example, the inner `grad_fn` accepts an extra `variables` input
|
| 209 |
+
parameter and also returns an extra `grad_vars` output. That extra argument
|
| 210 |
+
is passed if the forward function reads any variables. You need to
|
| 211 |
+
compute the gradient w.r.t. each of those `variables` and output it as a list
|
| 212 |
+
of `grad_vars`. Note here that default value of `variables` is set to `None`
|
| 213 |
+
when no variables are used in the forward function.
|
| 214 |
+
|
| 215 |
+
It should be noted `tf.GradientTape` is still watching the forward pass of a
|
| 216 |
+
`tf.custom_gradient`, and will use the ops it watches. As a consequence,
|
| 217 |
+
calling `tf.function` while the tape is still watching leads
|
| 218 |
+
to a gradient graph being built. If an op is used in `tf.function` without
|
| 219 |
+
registered gradient, a `LookupError` will be raised.
|
| 220 |
+
|
| 221 |
+
Users can insert `tf.stop_gradient` to customize this behavior. This
|
| 222 |
+
is demonstrated in the example below. `tf.random.shuffle` does not have a
|
| 223 |
+
registered gradient. As a result `tf.stop_gradient` is used to avoid the
|
| 224 |
+
`LookupError`.
|
| 225 |
+
|
| 226 |
+
```python
|
| 227 |
+
x = tf.constant([0.3, 0.5], dtype=tf.float32)
|
| 228 |
+
|
| 229 |
+
@tf.custom_gradient
|
| 230 |
+
def test_func_with_stop_grad(x):
|
| 231 |
+
@tf.function
|
| 232 |
+
def _inner_func():
|
| 233 |
+
# Avoid exception during the forward pass
|
| 234 |
+
return tf.stop_gradient(tf.random.shuffle(x))
|
| 235 |
+
# return tf.random.shuffle(x) # This will raise
|
| 236 |
+
|
| 237 |
+
res = _inner_func()
|
| 238 |
+
def grad(upstream):
|
| 239 |
+
return upstream # Arbitrarily defined custom gradient
|
| 240 |
+
return res, grad
|
| 241 |
+
|
| 242 |
+
with tf.GradientTape() as g:
|
| 243 |
+
g.watch(x)
|
| 244 |
+
res = test_func_with_stop_grad(x)
|
| 245 |
+
|
| 246 |
+
g.gradient(res, x)
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
See also `tf.RegisterGradient` which registers a gradient function for a
|
| 250 |
+
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
|
| 251 |
+
for fine grained control over the gradient computation of a sequence of
|
| 252 |
+
operations.
|
| 253 |
+
|
| 254 |
+
Note that if the decorated function uses `Variable`s, the enclosing variable
|
| 255 |
+
scope must be using
|
| 256 |
+
[ResourceVariables](https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables).
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
f: function `f(*x)` that returns a tuple `(y, grad_fn)` where: - `x` is a
|
| 260 |
+
sequence of (nested structures of) `Tensor` inputs to the function. - `y`
|
| 261 |
+
is a (nested structure of) `Tensor` outputs of applying TensorFlow
|
| 262 |
+
operations in `f` to `x`. - `grad_fn` is a function with the signature
|
| 263 |
+
`g(*grad_ys)` which returns a list of `Tensor`s the same size as
|
| 264 |
+
(flattened) `x` - the derivatives of `Tensor`s in `y` with respect to the
|
| 265 |
+
`Tensor`s in `x`. `grad_ys` is a sequence of `Tensor`s the same size as
|
| 266 |
+
(flattened) `y` holding the initial value gradients for each `Tensor` in
|
| 267 |
+
`y`. In a pure mathematical sense, a vector-argument vector-valued
|
| 268 |
+
function `f`'s derivatives should be its Jacobian matrix `J`. Here we are
|
| 269 |
+
expressing the Jacobian `J` as a function `grad_fn` which defines how `J`
|
| 270 |
+
will transform a vector `grad_ys` when left-multiplied with it (`grad_ys *
|
| 271 |
+
J`, the vector-Jacobian product, or VJP). This functional representation
|
| 272 |
+
of a matrix is convenient to use for chain-rule calculation (in e.g. the
|
| 273 |
+
back-propagation algorithm). If `f` uses `Variable`s (that are not part
|
| 274 |
+
of the inputs), i.e. through `get_variable`, then `grad_fn` should have
|
| 275 |
+
signature `g(*grad_ys, variables=None)`, where `variables` is a list of
|
| 276 |
+
the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where
|
| 277 |
+
`grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>` with
|
| 278 |
+
the derivatives of `Tensor`s in `y` with respect to the variables (that
|
| 279 |
+
is, grad_vars has one Tensor per variable in variables).
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
A function `h(x)` which returns the same value as `f(x)[0]` and whose
|
| 283 |
+
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
if f is None:
|
| 287 |
+
return lambda f: custom_gradient(f=f)
|
| 288 |
+
|
| 289 |
+
@Bind.decorator
|
| 290 |
+
def decorated(wrapped, args, kwargs):
|
| 291 |
+
"""Decorated function with custom gradient."""
|
| 292 |
+
if context.executing_eagerly():
|
| 293 |
+
return _eager_mode_decorator(wrapped, args, kwargs)
|
| 294 |
+
else:
|
| 295 |
+
return _graph_mode_decorator(wrapped, args, kwargs)
|
| 296 |
+
|
| 297 |
+
return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class Bind:
|
| 301 |
+
"""When called evaluates `d(f, args, kwargs)` but supports binding `f`.
|
| 302 |
+
|
| 303 |
+
>>> @Bind.decorator
|
| 304 |
+
... def my_decorator(f, args, kwargs):
|
| 305 |
+
... print("my_decorator called with", args, kwargs)
|
| 306 |
+
... return f(*args, **kwargs)
|
| 307 |
+
|
| 308 |
+
>>> class Foo:
|
| 309 |
+
... @my_decorator
|
| 310 |
+
... def bar(self, a, b, c):
|
| 311 |
+
... return a * b * c
|
| 312 |
+
|
| 313 |
+
>>> Foo.bar(None, 1, 2, c=3)
|
| 314 |
+
my_decorator called with (None, 1, 2) {'c': 3}
|
| 315 |
+
6
|
| 316 |
+
|
| 317 |
+
>>> foo = Foo()
|
| 318 |
+
>>> foo.bar(1, 2, c=3)
|
| 319 |
+
my_decorator called with (1, 2) {'c': 3}
|
| 320 |
+
6
|
| 321 |
+
"""
|
| 322 |
+
|
| 323 |
+
@classmethod
|
| 324 |
+
def decorator(cls, d):
|
| 325 |
+
return lambda f: Bind(f, d)
|
| 326 |
+
|
| 327 |
+
def __init__(self, f, d):
|
| 328 |
+
self._f = f
|
| 329 |
+
self._d = d
|
| 330 |
+
|
| 331 |
+
def __get__(self, instance, owner):
|
| 332 |
+
if instance is not None:
|
| 333 |
+
f = self._f.__get__(instance, owner)
|
| 334 |
+
return tf_decorator.make_decorator(f, Bind(f, self._d))
|
| 335 |
+
else:
|
| 336 |
+
return self
|
| 337 |
+
|
| 338 |
+
def __call__(self, *a, **k):
|
| 339 |
+
return self._d(self._f, a, k)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def get_variable_by_name(var_name):
|
| 343 |
+
"""Given a variable name, retrieves a handle on the tensorflow Variable."""
|
| 344 |
+
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
|
| 345 |
+
|
| 346 |
+
def _filter_fn(item):
|
| 347 |
+
try:
|
| 348 |
+
return var_name == item.op.name
|
| 349 |
+
except AttributeError:
|
| 350 |
+
# Collection items without operation are ignored.
|
| 351 |
+
return False
|
| 352 |
+
|
| 353 |
+
candidate_vars = list(filter(_filter_fn, global_vars))
|
| 354 |
+
|
| 355 |
+
if len(candidate_vars) >= 1:
|
| 356 |
+
# Filter out non-trainable variables.
|
| 357 |
+
candidate_vars = [v for v in candidate_vars if v.trainable]
|
| 358 |
+
else:
|
| 359 |
+
raise ValueError("Unsuccessful at finding variable {}.".format(var_name))
|
| 360 |
+
|
| 361 |
+
if len(candidate_vars) == 1:
|
| 362 |
+
return candidate_vars[0]
|
| 363 |
+
elif len(candidate_vars) > 1:
|
| 364 |
+
raise ValueError(
|
| 365 |
+
"Unsuccessful at finding trainable variable {}. "
|
| 366 |
+
"Number of candidates: {}. "
|
| 367 |
+
"Candidates: {}".format(var_name, len(candidate_vars), candidate_vars))
|
| 368 |
+
else:
|
| 369 |
+
# The variable is not trainable.
|
| 370 |
+
return None
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def _get_dependent_variables(input_ops, output_ops):
|
| 374 |
+
"""Finds variables involved in the subgraph between input_ops and output_ops.
|
| 375 |
+
|
| 376 |
+
Args:
|
| 377 |
+
input_ops: Flattened list of input ops
|
| 378 |
+
output_ops: Flattened list of output ops
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
A list of variables
|
| 382 |
+
"""
|
| 383 |
+
|
| 384 |
+
# avoids the edge-case when input_ops == output_ops.
|
| 385 |
+
output_ops = nest.map_structure(gen_array_ops.identity, output_ops)
|
| 386 |
+
inbetween_ops = op_selector.get_backward_walk_ops(
|
| 387 |
+
seed_ops=output_ops,
|
| 388 |
+
stop_at_ts=input_ops,
|
| 389 |
+
inclusive=False,
|
| 390 |
+
only_differentiable=True)
|
| 391 |
+
var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)
|
| 392 |
+
var_names = (op.name for op in var_ops)
|
| 393 |
+
tf_vars = (get_variable_by_name(var_name) for var_name in var_names)
|
| 394 |
+
tf_vars = [v for v in tf_vars if v is not None]
|
| 395 |
+
return tf_vars
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def generate_name():
|
| 399 |
+
return "CustomGradient-%s" % ops.uid()
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def _graph_mode_decorator(f, args, kwargs):
|
| 403 |
+
"""Implement custom gradient decorator for graph mode."""
|
| 404 |
+
# TODO(rsepassi): Add support for kwargs
|
| 405 |
+
if kwargs:
|
| 406 |
+
raise ValueError(
|
| 407 |
+
"The custom_gradient decorator currently supports keywords "
|
| 408 |
+
"arguments only when eager execution is enabled.")
|
| 409 |
+
name = generate_name()
|
| 410 |
+
args = variable_utils.convert_variables_to_tensors(args)
|
| 411 |
+
args = nest.map_structure(ops.convert_to_tensor, args, expand_composites=True)
|
| 412 |
+
|
| 413 |
+
# Checking global and local variables attempts to ensure that no non-resource
|
| 414 |
+
# Variables are added to the graph.
|
| 415 |
+
current_var_scope = variable_scope.get_variable_scope()
|
| 416 |
+
before_vars = set([
|
| 417 |
+
v.ref() for v in current_var_scope.global_variables() +
|
| 418 |
+
current_var_scope.local_variables()
|
| 419 |
+
])
|
| 420 |
+
with record.VariableWatcher() as variable_watcher:
|
| 421 |
+
result, grad_fn = f(*args)
|
| 422 |
+
|
| 423 |
+
flat_args = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 424 |
+
nest.flatten(args))
|
| 425 |
+
flat_result = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 426 |
+
nest.flatten(result))
|
| 427 |
+
flat_result_len = len(flat_result)
|
| 428 |
+
|
| 429 |
+
after_vars = set([
|
| 430 |
+
v.ref() for v in current_var_scope.global_variables() +
|
| 431 |
+
current_var_scope.local_variables()
|
| 432 |
+
])
|
| 433 |
+
new_vars = after_vars - before_vars
|
| 434 |
+
new_vars_list = [v.deref() for v in new_vars]
|
| 435 |
+
for v in new_vars_list:
|
| 436 |
+
if not resource_variable_ops.is_resource_variable(v):
|
| 437 |
+
raise TypeError(
|
| 438 |
+
"All variables used by a function wrapped with @custom_gradient must "
|
| 439 |
+
"be `ResourceVariable`s. Ensure that no `variable_scope` is created "
|
| 440 |
+
"with `use_resource=False`.")
|
| 441 |
+
|
| 442 |
+
# The variables that grad_fn needs to return gradients for are the set of
|
| 443 |
+
# variables used that are *not* part of the inputs.
|
| 444 |
+
variables_in_tape = frozenset([
|
| 445 |
+
v.ref() for v in variable_watcher.watched_variables()
|
| 446 |
+
])
|
| 447 |
+
|
| 448 |
+
graphs = {getattr(o, "graph", None) for o in flat_result}
|
| 449 |
+
# Not all results may be tensors. However, we want to ensure all tensor
|
| 450 |
+
# outputs are from the same graph and get a list of captured inputs for
|
| 451 |
+
# variable search
|
| 452 |
+
graphs.discard(None) # Discard non-graph outputs
|
| 453 |
+
if graphs:
|
| 454 |
+
if len(graphs) > 1:
|
| 455 |
+
raise ValueError(
|
| 456 |
+
"All custom_gradient outputs should be from the same graph")
|
| 457 |
+
output_graph = graphs.pop()
|
| 458 |
+
filtered_input_tensors = []
|
| 459 |
+
for i in flat_args:
|
| 460 |
+
if i.graph == output_graph:
|
| 461 |
+
filtered_input_tensors.append(i)
|
| 462 |
+
else:
|
| 463 |
+
filtered_input_tensors = flat_args
|
| 464 |
+
|
| 465 |
+
variables_in_subgraph = frozenset([
|
| 466 |
+
v.ref() for v in _get_dependent_variables(
|
| 467 |
+
input_ops=filtered_input_tensors, output_ops=flat_result)
|
| 468 |
+
])
|
| 469 |
+
variables = sorted(
|
| 470 |
+
[v.deref() for v in variables_in_subgraph.union(variables_in_tape)],
|
| 471 |
+
key=lambda v: v.name)
|
| 472 |
+
|
| 473 |
+
grad_argspec = tf_inspect.getfullargspec(grad_fn)
|
| 474 |
+
variables_in_signature = ("variables" in grad_argspec.args or
|
| 475 |
+
"variables" in grad_argspec.kwonlyargs or
|
| 476 |
+
grad_argspec.varkw)
|
| 477 |
+
if variables and not variables_in_signature:
|
| 478 |
+
raise TypeError(
|
| 479 |
+
"@tf.custom_gradient grad_fn must accept keyword argument 'variables', "
|
| 480 |
+
"since function uses variables: {}".format(variables))
|
| 481 |
+
if variables_in_signature and not variables:
|
| 482 |
+
# User seems to intend to use variables but none were captured.
|
| 483 |
+
logging.vlog(
|
| 484 |
+
1, "@custom_gradient grad_fn has 'variables' in signature, "
|
| 485 |
+
"but no ResourceVariables were used on the forward pass.")
|
| 486 |
+
|
| 487 |
+
all_tensors = flat_result + flat_args + variables
|
| 488 |
+
|
| 489 |
+
def tape_grad_fn(*result_grad_components):
|
| 490 |
+
"""Custom grad fn wrapper."""
|
| 491 |
+
result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(
|
| 492 |
+
nest.flatten(result), result_grad_components[:flat_result_len])
|
| 493 |
+
if not isinstance(result_grads, (list, tuple)):
|
| 494 |
+
result_grads = [result_grads]
|
| 495 |
+
|
| 496 |
+
if variables:
|
| 497 |
+
input_grads, variable_grads = grad_fn(*result_grads, variables=variables)
|
| 498 |
+
if len(variable_grads) != len(variables):
|
| 499 |
+
raise ValueError("Must return gradient for each variable from "
|
| 500 |
+
"@custom_gradient grad_fn.")
|
| 501 |
+
else:
|
| 502 |
+
input_grads = grad_fn(*result_grads)
|
| 503 |
+
variable_grads = []
|
| 504 |
+
|
| 505 |
+
# Need to return one value per input to the IdentityN, so pad the
|
| 506 |
+
# gradients of the inputs of the custom_gradient function with the
|
| 507 |
+
# gradients of the outputs as well.
|
| 508 |
+
input_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 509 |
+
nest.flatten(input_grads))
|
| 510 |
+
return ([None] * flat_result_len) + input_grads + variable_grads
|
| 511 |
+
|
| 512 |
+
@ops.RegisterGradient(name)
|
| 513 |
+
def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable
|
| 514 |
+
"""Custom grad fn wrapper."""
|
| 515 |
+
return tape_grad_fn(*result_grads)
|
| 516 |
+
|
| 517 |
+
original_tensors = all_tensors
|
| 518 |
+
with ops.get_default_graph().gradient_override_map({"IdentityN": name}):
|
| 519 |
+
all_tensors = array_ops.identity_n(all_tensors)
|
| 520 |
+
|
| 521 |
+
original_tensors = [ops.convert_to_tensor(x) for x in original_tensors]
|
| 522 |
+
|
| 523 |
+
# Propagate handle data for happier shape inference for resource variables.
|
| 524 |
+
for i, t in enumerate(original_tensors):
|
| 525 |
+
if t.dtype == dtypes.resource and hasattr(t, "_handle_data"):
|
| 526 |
+
all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access
|
| 527 |
+
record.record_operation(
|
| 528 |
+
f.__name__, all_tensors, original_tensors, tape_grad_fn)
|
| 529 |
+
for ot, t in zip(original_tensors, all_tensors):
|
| 530 |
+
handle_data_util.copy_handle_data(ot, t)
|
| 531 |
+
flat_result = composite_tensor_gradient.replace_flat_tensors_for_gradients(
|
| 532 |
+
nest.flatten(result), all_tensors[:flat_result_len])
|
| 533 |
+
return nest.pack_sequence_as(result, flat_result)
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def _eager_mode_decorator(f, args, kwargs):
|
| 537 |
+
"""Implement custom gradient decorator for eager mode."""
|
| 538 |
+
with record.VariableWatcher() as variable_watcher:
|
| 539 |
+
result, grad_fn = f(*args, **kwargs)
|
| 540 |
+
flat_args = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 541 |
+
nest.flatten(args))
|
| 542 |
+
flat_kwargs = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 543 |
+
nest.flatten(kwargs))
|
| 544 |
+
all_inputs = flat_args + flat_kwargs
|
| 545 |
+
# The variables that grad_fn needs to return gradients for are the set of
|
| 546 |
+
# variables used that are *not* part of the inputs.
|
| 547 |
+
variables = [
|
| 548 |
+
v.deref() # pylint: disable=g-complex-comprehension
|
| 549 |
+
for v in set(v.ref() for v in variable_watcher.watched_variables())
|
| 550 |
+
if all(v.deref() is not i for i in all_inputs)
|
| 551 |
+
]
|
| 552 |
+
grad_argspec = tf_inspect.getfullargspec(grad_fn)
|
| 553 |
+
if (variables and ("variables" not in grad_argspec.args) and
|
| 554 |
+
("variables" not in grad_argspec.kwonlyargs) and
|
| 555 |
+
not grad_argspec.varkw):
|
| 556 |
+
raise TypeError(
|
| 557 |
+
"@tf.custom_gradient grad_fn must accept keyword argument 'variables', "
|
| 558 |
+
"since function uses variables: {}".format(variables))
|
| 559 |
+
flat_result = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 560 |
+
nest.flatten(result))
|
| 561 |
+
# TODO(apassos) consider removing the identity below.
|
| 562 |
+
flat_result = [gen_array_ops.identity(x) for x in flat_result]
|
| 563 |
+
|
| 564 |
+
input_tensors = [
|
| 565 |
+
ops.convert_to_tensor(x) for x in flat_args + list(variables)]
|
| 566 |
+
|
| 567 |
+
recorded_inputs = input_tensors
|
| 568 |
+
arg_count = len(flat_args)
|
| 569 |
+
|
| 570 |
+
def actual_grad_fn(*result_grad_components):
|
| 571 |
+
"""Custom grad fn wrapper."""
|
| 572 |
+
result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(
|
| 573 |
+
nest.flatten(result), result_grad_components)
|
| 574 |
+
if not isinstance(result_grads, (list, tuple)):
|
| 575 |
+
result_grads = [result_grads]
|
| 576 |
+
|
| 577 |
+
if variables:
|
| 578 |
+
input_grads, variable_grads = grad_fn(*result_grads, variables=variables)
|
| 579 |
+
if len(variable_grads) != len(variables):
|
| 580 |
+
raise ValueError("Must return gradient for each variable from "
|
| 581 |
+
"@custom_gradient grad_fn.")
|
| 582 |
+
else:
|
| 583 |
+
input_grads = grad_fn(*result_grads)
|
| 584 |
+
variable_grads = []
|
| 585 |
+
flat_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(
|
| 586 |
+
nest.flatten(input_grads))
|
| 587 |
+
if len(flat_grads) != arg_count:
|
| 588 |
+
raise ValueError(
|
| 589 |
+
f"custom_gradient function expected to return {arg_count} "
|
| 590 |
+
f"gradients, but returned {len(flat_grads)} instead.")
|
| 591 |
+
return flat_grads + variable_grads
|
| 592 |
+
|
| 593 |
+
record.record_operation(f.__name__, flat_result, recorded_inputs,
|
| 594 |
+
actual_grad_fn)
|
| 595 |
+
flat_result = composite_tensor_gradient.replace_flat_tensors_for_gradients(
|
| 596 |
+
nest.flatten(result), flat_result)
|
| 597 |
+
return nest.pack_sequence_as(result, flat_result)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
@tf_export("recompute_grad")
|
| 601 |
+
def recompute_grad(f):
|
| 602 |
+
"""Defines a function as a recompute-checkpoint for the tape auto-diff.
|
| 603 |
+
|
| 604 |
+
Tape checkpointing is a technique to reduce the memory consumption of the
|
| 605 |
+
auto-diff tape:
|
| 606 |
+
|
| 607 |
+
- Without tape checkpointing operations and intermediate values are
|
| 608 |
+
recorded to the tape for use in the backward pass.
|
| 609 |
+
|
| 610 |
+
- With tape checkpointing, only the function call and its inputs are
|
| 611 |
+
recorded. During back-propagation the `recompute_grad` custom gradient
|
| 612 |
+
(`tf.custom_gradient`) recomputes the function under a localized Tape object.
|
| 613 |
+
This recomputation of the function during backpropagation performs redundant
|
| 614 |
+
calculation, but reduces the overall memory usage of the Tape.
|
| 615 |
+
|
| 616 |
+
>>> y = tf.Variable(1.0)
|
| 617 |
+
|
| 618 |
+
>>> def my_function(x):
|
| 619 |
+
... tf.print('running')
|
| 620 |
+
... z = x*y
|
| 621 |
+
... return z
|
| 622 |
+
|
| 623 |
+
>>> my_function_recompute = tf.recompute_grad(my_function)
|
| 624 |
+
|
| 625 |
+
>>> with tf.GradientTape() as tape:
|
| 626 |
+
... r = tf.constant(1.0)
|
| 627 |
+
... for i in range(4):
|
| 628 |
+
... r = my_function_recompute(r)
|
| 629 |
+
running
|
| 630 |
+
running
|
| 631 |
+
running
|
| 632 |
+
running
|
| 633 |
+
|
| 634 |
+
>>> grad = tape.gradient(r, [y])
|
| 635 |
+
running
|
| 636 |
+
running
|
| 637 |
+
running
|
| 638 |
+
running
|
| 639 |
+
|
| 640 |
+
Without `recompute_grad`, the tape contains all intermitate steps, and no
|
| 641 |
+
recomputation is performed.
|
| 642 |
+
|
| 643 |
+
>>> with tf.GradientTape() as tape:
|
| 644 |
+
... r = tf.constant(1.0)
|
| 645 |
+
... for i in range(4):
|
| 646 |
+
... r = my_function(r)
|
| 647 |
+
running
|
| 648 |
+
running
|
| 649 |
+
running
|
| 650 |
+
running
|
| 651 |
+
|
| 652 |
+
>>> grad = tape.gradient(r, [y])
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
If `f` was a `tf.keras` `Model` or `Layer` object, methods and attributes
|
| 656 |
+
such as `f.variables` are not available on the returned function `g`.
|
| 657 |
+
Either keep a reference of `f` , or use `g.__wrapped__` for accessing
|
| 658 |
+
these variables and methods.
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
>>> def print_running_and_return(x):
|
| 662 |
+
... tf.print("running")
|
| 663 |
+
... return x
|
| 664 |
+
|
| 665 |
+
>>> model = tf.keras.Sequential([
|
| 666 |
+
... tf.keras.layers.Lambda(print_running_and_return),
|
| 667 |
+
... tf.keras.layers.Dense(2)
|
| 668 |
+
... ])
|
| 669 |
+
|
| 670 |
+
>>> model_recompute = tf.recompute_grad(model)
|
| 671 |
+
|
| 672 |
+
>>> with tf.GradientTape(persistent=True) as tape:
|
| 673 |
+
... r = tf.constant([[1,2]])
|
| 674 |
+
... for i in range(4):
|
| 675 |
+
... r = model_recompute(r)
|
| 676 |
+
running
|
| 677 |
+
running
|
| 678 |
+
running
|
| 679 |
+
running
|
| 680 |
+
|
| 681 |
+
>>> grad = tape.gradient(r, model.variables)
|
| 682 |
+
running
|
| 683 |
+
running
|
| 684 |
+
running
|
| 685 |
+
running
|
| 686 |
+
|
| 687 |
+
Alternatively, use the `__wrapped__` attribute to access the original
|
| 688 |
+
model object.
|
| 689 |
+
|
| 690 |
+
>>> grad = tape.gradient(r, model_recompute.__wrapped__.variables)
|
| 691 |
+
running
|
| 692 |
+
running
|
| 693 |
+
running
|
| 694 |
+
running
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
Args:
|
| 698 |
+
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
|
| 699 |
+
|
| 700 |
+
Returns:
|
| 701 |
+
A function `g` wrapping `f` that defines a custom gradient, which recomputes
|
| 702 |
+
`f` on the backwards pass of a gradient call.
|
| 703 |
+
"""
|
| 704 |
+
# TODO(cdfreeman) Add is_recomputing functionality from graph mode version
|
| 705 |
+
|
| 706 |
+
@custom_gradient
|
| 707 |
+
def inner(*args, **kwargs):
|
| 708 |
+
"""Inner function closure for calculating gradients."""
|
| 709 |
+
current_var_scope = variable_scope.get_variable_scope()
|
| 710 |
+
with record.stop_recording():
|
| 711 |
+
result = f(*args, **kwargs)
|
| 712 |
+
|
| 713 |
+
def grad_wrapper(*wrapper_args, variables=None):
|
| 714 |
+
"""Wrapper function to accomodate lack of kwargs in graph mode custom_gradient."""
|
| 715 |
+
|
| 716 |
+
@custom_gradient
|
| 717 |
+
def inner_recompute_grad(*dresult):
|
| 718 |
+
"""Nested custom gradient function for computing grads in reverse and forward mode autodiff."""
|
| 719 |
+
# Gradient calculation for reverse mode autodiff.
|
| 720 |
+
with backprop.GradientTape() as t:
|
| 721 |
+
id_args = nest.map_structure(gen_array_ops.identity, args)
|
| 722 |
+
# Tuple `dresult` should contain at least one tensor.
|
| 723 |
+
assert len(dresult) >= 1
|
| 724 |
+
|
| 725 |
+
if not context.executing_eagerly():
|
| 726 |
+
# XLA doesn't respect `tf.control_dependencies`. The code block
|
| 727 |
+
# below manually adds a data dependency to `dresult` to ensure
|
| 728 |
+
# recomputation of `f(*args, **kwargs)` happens after `dresult`.
|
| 729 |
+
|
| 730 |
+
# This works even if `dresult[0]` is a size 0 tensor as reduce_max
|
| 731 |
+
# of a size 0 tensor returns -inf. Use reshape here to avoid reading
|
| 732 |
+
# the entire `dresult[0]`.
|
| 733 |
+
elem = math_ops.reduce_max(array_ops.reshape(dresult[0], [-1])[:1])
|
| 734 |
+
# Cast elem to bool in case elem is NaN.
|
| 735 |
+
elem_bool = math_ops.cast(elem, dtypes.bool)
|
| 736 |
+
dresult_dep = array_ops.where_v2(
|
| 737 |
+
elem_bool == elem_bool, 0., float("nan")) # pylint: disable=comparison-with-itself
|
| 738 |
+
id_args = nest.map_structure(
|
| 739 |
+
lambda x: x + math_ops.cast(dresult_dep, x.dtype), id_args)
|
| 740 |
+
|
| 741 |
+
t.watch(id_args)
|
| 742 |
+
if variables is not None:
|
| 743 |
+
t.watch(variables)
|
| 744 |
+
with variable_scope.variable_scope(current_var_scope):
|
| 745 |
+
recomputed_result = f(*id_args, **kwargs)
|
| 746 |
+
kw_vars = []
|
| 747 |
+
if variables is not None:
|
| 748 |
+
kw_vars = list(variables)
|
| 749 |
+
grads = t.gradient(
|
| 750 |
+
recomputed_result,
|
| 751 |
+
list(id_args) + kw_vars,
|
| 752 |
+
output_gradients=dresult,
|
| 753 |
+
unconnected_gradients=UnconnectedGradients.ZERO)
|
| 754 |
+
|
| 755 |
+
def transpose(*t_args, **t_kwargs):
|
| 756 |
+
"""Gradient function calculation for forward mode autodiff."""
|
| 757 |
+
# Just throw an error since gradients / activations are not stored on
|
| 758 |
+
# tape for recompute.
|
| 759 |
+
raise NotImplementedError(
|
| 760 |
+
"recompute_grad tried to transpose grad of {}. "
|
| 761 |
+
"Consider not using recompute_grad in forward mode"
|
| 762 |
+
"autodiff".format(f.__name__))
|
| 763 |
+
|
| 764 |
+
return (grads[:len(id_args)], grads[len(id_args):]), transpose
|
| 765 |
+
|
| 766 |
+
return inner_recompute_grad(*wrapper_args)
|
| 767 |
+
|
| 768 |
+
return result, grad_wrapper
|
| 769 |
+
|
| 770 |
+
return tf_decorator.make_decorator(f, inner)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@tf_export("grad_pass_through")
|
| 774 |
+
def grad_pass_through(f):
|
| 775 |
+
"""Creates a grad-pass-through op with the forward behavior provided in f.
|
| 776 |
+
|
| 777 |
+
Use this function to wrap any op, maintaining its behavior in the forward
|
| 778 |
+
pass, but replacing the original op in the backward graph with an identity.
|
| 779 |
+
For example:
|
| 780 |
+
|
| 781 |
+
```python
|
| 782 |
+
x = tf.Variable(1.0, name="x")
|
| 783 |
+
z = tf.Variable(3.0, name="z")
|
| 784 |
+
|
| 785 |
+
with tf.GradientTape() as tape:
|
| 786 |
+
# y will evaluate to 9.0
|
| 787 |
+
y = tf.grad_pass_through(x.assign)(z**2)
|
| 788 |
+
# grads will evaluate to 6.0
|
| 789 |
+
grads = tape.gradient(y, z)
|
| 790 |
+
```
|
| 791 |
+
|
| 792 |
+
Another example is a 'differentiable' moving average approximation, where
|
| 793 |
+
gradients are allowed to flow into the last value fed to the moving average,
|
| 794 |
+
but the moving average is still used for the forward pass:
|
| 795 |
+
|
| 796 |
+
```python
|
| 797 |
+
x = ... # Some scalar value
|
| 798 |
+
# A moving average object, we don't need to know how this is implemented
|
| 799 |
+
moving_average = MovingAverage()
|
| 800 |
+
with backprop.GradientTape() as tape:
|
| 801 |
+
# mavg_x will evaluate to the current running average value
|
| 802 |
+
mavg_x = tf.grad_pass_through(moving_average)(x)
|
| 803 |
+
grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0
|
| 804 |
+
```
|
| 805 |
+
|
| 806 |
+
Args:
|
| 807 |
+
f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor`
|
| 808 |
+
outputs.
|
| 809 |
+
|
| 810 |
+
Returns:
|
| 811 |
+
A function `h(x)` which returns the same values as `f(x)` and whose
|
| 812 |
+
gradients are the same as those of an identity function.
|
| 813 |
+
"""
|
| 814 |
+
@custom_gradient
|
| 815 |
+
def _grad_pass_through_op(*args, **kwargs):
|
| 816 |
+
def grad(*args, **kwargs):
|
| 817 |
+
variables = kwargs.get("variables")
|
| 818 |
+
if variables is not None:
|
| 819 |
+
# Variables involved in the wrapped op will not receive gradients.
|
| 820 |
+
return args, [None] * len(variables)
|
| 821 |
+
return args
|
| 822 |
+
return f(*args, **kwargs), grad
|
| 823 |
+
return tf_decorator.make_decorator(f, _grad_pass_through_op)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/default_gradient.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities for computing default gradients."""
|
| 16 |
+
from tensorflow.python.framework import dtypes
|
| 17 |
+
from tensorflow.python.framework import tensor_shape
|
| 18 |
+
from tensorflow.python.ops import array_ops
|
| 19 |
+
from tensorflow.python.ops import resource_variable_ops
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_zeros_dtype(t):
|
| 23 |
+
"""Return the dtype for the default gradient for a Tensor."""
|
| 24 |
+
if t.dtype == dtypes.resource:
|
| 25 |
+
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
|
| 26 |
+
if (handle_data is None or not handle_data.is_set or
|
| 27 |
+
len(handle_data.shape_and_type) != 1):
|
| 28 |
+
raise ValueError("Internal error: Tried to take gradients (or similar) "
|
| 29 |
+
"of a variable without handle data:\n%s" % str(t))
|
| 30 |
+
return handle_data.shape_and_type[0].dtype
|
| 31 |
+
return t.dtype
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def shape_and_dtype(t):
|
| 35 |
+
"""Return the shape and dtype for the default gradient for a Tensor."""
|
| 36 |
+
if t.dtype == dtypes.resource:
|
| 37 |
+
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
|
| 38 |
+
if (handle_data is None or not handle_data.is_set or
|
| 39 |
+
len(handle_data.shape_and_type) != 1):
|
| 40 |
+
raise ValueError("Internal error: Tried to take gradients (or similar) "
|
| 41 |
+
"of a variable without handle data:\n%s" % str(t))
|
| 42 |
+
shape_and_type = handle_data.shape_and_type[0]
|
| 43 |
+
return (tensor_shape.TensorShape(shape_and_type.shape),
|
| 44 |
+
dtypes.as_dtype(shape_and_type.dtype))
|
| 45 |
+
return t.shape, t.dtype
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def zeros_like(t):
|
| 49 |
+
"""Like array_ops.zeros_like, but respects resource handles."""
|
| 50 |
+
if t.dtype == dtypes.resource:
|
| 51 |
+
return array_ops.zeros(*shape_and_dtype(t))
|
| 52 |
+
else:
|
| 53 |
+
return array_ops.zeros_like(t)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def ones_like(t):
|
| 57 |
+
"""Like array_ops.ones_like, but respects resource handles."""
|
| 58 |
+
if t.dtype == dtypes.resource:
|
| 59 |
+
return array_ops.ones(*shape_and_dtype(t))
|
| 60 |
+
else:
|
| 61 |
+
return array_ops.ones_like(t)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def supports_default_grad(t):
|
| 65 |
+
"""Whether tensor `t` supports creating a default gradient.
|
| 66 |
+
|
| 67 |
+
This function assumes that `t` is of a trainable type.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
t: Tensor
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Bool
|
| 74 |
+
"""
|
| 75 |
+
if t.dtype == dtypes.resource:
|
| 76 |
+
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
|
| 77 |
+
if (handle_data is None or not handle_data.is_set or
|
| 78 |
+
len(handle_data.shape_and_type) != 1):
|
| 79 |
+
return False
|
| 80 |
+
return True
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/filesystem_ops.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Filesystem related operations."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.ops import gen_filesystem_ops as _gen_filesystem_ops
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# pylint: disable=protected-access
|
| 21 |
+
def filesystem_set_configuration(scheme, key, value, name=None):
|
| 22 |
+
"""Set configuration of the file system.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
scheme: File system scheme.
|
| 26 |
+
key: The name of the configuration option.
|
| 27 |
+
value: The value of the configuration option.
|
| 28 |
+
name: A name for the operation (optional).
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
None.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
return _gen_filesystem_ops.file_system_set_configuration(
|
| 35 |
+
scheme, key=key, value=value, name=name)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# pylint: enable=protected-access
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_batch_ops.py
ADDED
|
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Python wrappers around TensorFlow ops.
|
| 2 |
+
|
| 3 |
+
This file is MACHINE GENERATED! Do not edit.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
|
| 8 |
+
from tensorflow.python import pywrap_tfe as pywrap_tfe
|
| 9 |
+
from tensorflow.python.eager import context as _context
|
| 10 |
+
from tensorflow.python.eager import core as _core
|
| 11 |
+
from tensorflow.python.eager import execute as _execute
|
| 12 |
+
from tensorflow.python.framework import dtypes as _dtypes
|
| 13 |
+
from tensorflow.security.fuzzing.py import annotation_types as _atypes
|
| 14 |
+
|
| 15 |
+
from tensorflow.python.framework import op_def_registry as _op_def_registry
|
| 16 |
+
from tensorflow.python.framework import ops as _ops
|
| 17 |
+
from tensorflow.python.framework import op_def_library as _op_def_library
|
| 18 |
+
from tensorflow.python.util.deprecation import deprecated_endpoints
|
| 19 |
+
from tensorflow.python.util import dispatch as _dispatch
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
from typing import TypeVar, List, Any
|
| 23 |
+
from typing_extensions import Annotated
|
| 24 |
+
_BatchOutput = collections.namedtuple(
|
| 25 |
+
"Batch",
|
| 26 |
+
["batched_tensors", "batch_index", "id"])
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def batch(in_tensors, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, grad_timeout_micros: int, max_enqueued_batches:int=10, allowed_batch_sizes=[], container:str="", shared_name:str="", batching_queue:str="", name=None):
|
| 30 |
+
r"""Batches all input tensors nondeterministically.
|
| 31 |
+
|
| 32 |
+
When many instances of this Op are being run concurrently with the same
|
| 33 |
+
container/shared_name in the same device, some will output zero-shaped Tensors
|
| 34 |
+
and others will output Tensors of size up to max_batch_size.
|
| 35 |
+
|
| 36 |
+
All Tensors in in_tensors are batched together (so, for example, labels and
|
| 37 |
+
features should be batched with a single instance of this operation.
|
| 38 |
+
|
| 39 |
+
Each invocation of batch emits an `id` scalar which will be used to identify
|
| 40 |
+
this particular invocation when doing unbatch or its gradient.
|
| 41 |
+
|
| 42 |
+
Each op which emits a non-empty batch will also emit a non-empty batch_index
|
| 43 |
+
Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
|
| 44 |
+
start, and length of elements of each set of Tensors present in batched_tensors.
|
| 45 |
+
|
| 46 |
+
Batched tensors are concatenated along the first dimension, and all tensors in
|
| 47 |
+
in_tensors must have the first dimension of the same size.
|
| 48 |
+
|
| 49 |
+
in_tensors: The tensors to be batched.
|
| 50 |
+
num_batch_threads: Number of scheduling threads for processing batches of work.
|
| 51 |
+
Determines the number of batches processed in parallel.
|
| 52 |
+
max_batch_size: Batch sizes will never be bigger than this.
|
| 53 |
+
batch_timeout_micros: Maximum number of microseconds to wait before outputting
|
| 54 |
+
an incomplete batch.
|
| 55 |
+
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
|
| 56 |
+
nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
|
| 57 |
+
batches up to one of those sizes. The entries must increase monotonically, and
|
| 58 |
+
the final entry must equal max_batch_size.
|
| 59 |
+
grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
|
| 60 |
+
batched_tensors: Either empty tensors or a batch of concatenated Tensors.
|
| 61 |
+
batch_index: If out_tensors is non-empty, has information to invert it.
|
| 62 |
+
container: Controls the scope of sharing of this batch.
|
| 63 |
+
id: always contains a scalar with a unique ID for this invocation of Batch.
|
| 64 |
+
shared_name: Concurrently running instances of batch in the same device with the
|
| 65 |
+
same container and shared_name will batch their elements together. If left
|
| 66 |
+
empty, the op name will be used as the shared name.
|
| 67 |
+
T: the types of tensors to be batched.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
in_tensors: A list of `Tensor` objects.
|
| 71 |
+
num_batch_threads: An `int`.
|
| 72 |
+
max_batch_size: An `int`.
|
| 73 |
+
batch_timeout_micros: An `int`.
|
| 74 |
+
grad_timeout_micros: An `int`.
|
| 75 |
+
max_enqueued_batches: An optional `int`. Defaults to `10`.
|
| 76 |
+
allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
|
| 77 |
+
container: An optional `string`. Defaults to `""`.
|
| 78 |
+
shared_name: An optional `string`. Defaults to `""`.
|
| 79 |
+
batching_queue: An optional `string`. Defaults to `""`.
|
| 80 |
+
name: A name for the operation (optional).
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
A tuple of `Tensor` objects (batched_tensors, batch_index, id).
|
| 84 |
+
|
| 85 |
+
batched_tensors: A list of `Tensor` objects. Has the same type as `in_tensors`.
|
| 86 |
+
batch_index: A `Tensor` of type `int64`.
|
| 87 |
+
id: A `Tensor` of type `int64`.
|
| 88 |
+
"""
|
| 89 |
+
_ctx = _context._context or _context.context()
|
| 90 |
+
tld = _ctx._thread_local_data
|
| 91 |
+
if tld.is_eager:
|
| 92 |
+
try:
|
| 93 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 94 |
+
_ctx, "Batch", name, in_tensors, "num_batch_threads",
|
| 95 |
+
num_batch_threads, "max_batch_size", max_batch_size,
|
| 96 |
+
"max_enqueued_batches", max_enqueued_batches, "batch_timeout_micros",
|
| 97 |
+
batch_timeout_micros, "allowed_batch_sizes", allowed_batch_sizes,
|
| 98 |
+
"grad_timeout_micros", grad_timeout_micros, "container", container,
|
| 99 |
+
"shared_name", shared_name, "batching_queue", batching_queue)
|
| 100 |
+
_result = _BatchOutput._make(_result)
|
| 101 |
+
return _result
|
| 102 |
+
except _core._NotOkStatusException as e:
|
| 103 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 104 |
+
except _core._FallbackException:
|
| 105 |
+
pass
|
| 106 |
+
try:
|
| 107 |
+
return batch_eager_fallback(
|
| 108 |
+
in_tensors, num_batch_threads=num_batch_threads,
|
| 109 |
+
max_batch_size=max_batch_size,
|
| 110 |
+
max_enqueued_batches=max_enqueued_batches,
|
| 111 |
+
batch_timeout_micros=batch_timeout_micros,
|
| 112 |
+
allowed_batch_sizes=allowed_batch_sizes,
|
| 113 |
+
grad_timeout_micros=grad_timeout_micros, container=container,
|
| 114 |
+
shared_name=shared_name, batching_queue=batching_queue, name=name,
|
| 115 |
+
ctx=_ctx)
|
| 116 |
+
except _core._SymbolicException:
|
| 117 |
+
pass # Add nodes to the TensorFlow graph.
|
| 118 |
+
# Add nodes to the TensorFlow graph.
|
| 119 |
+
num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
|
| 120 |
+
max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
|
| 121 |
+
batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
|
| 122 |
+
grad_timeout_micros = _execute.make_int(grad_timeout_micros, "grad_timeout_micros")
|
| 123 |
+
if max_enqueued_batches is None:
|
| 124 |
+
max_enqueued_batches = 10
|
| 125 |
+
max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
|
| 126 |
+
if allowed_batch_sizes is None:
|
| 127 |
+
allowed_batch_sizes = []
|
| 128 |
+
if not isinstance(allowed_batch_sizes, (list, tuple)):
|
| 129 |
+
raise TypeError(
|
| 130 |
+
"Expected list for 'allowed_batch_sizes' argument to "
|
| 131 |
+
"'batch' Op, not %r." % allowed_batch_sizes)
|
| 132 |
+
allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
|
| 133 |
+
if container is None:
|
| 134 |
+
container = ""
|
| 135 |
+
container = _execute.make_str(container, "container")
|
| 136 |
+
if shared_name is None:
|
| 137 |
+
shared_name = ""
|
| 138 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 139 |
+
if batching_queue is None:
|
| 140 |
+
batching_queue = ""
|
| 141 |
+
batching_queue = _execute.make_str(batching_queue, "batching_queue")
|
| 142 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 143 |
+
"Batch", in_tensors=in_tensors, num_batch_threads=num_batch_threads,
|
| 144 |
+
max_batch_size=max_batch_size,
|
| 145 |
+
batch_timeout_micros=batch_timeout_micros,
|
| 146 |
+
grad_timeout_micros=grad_timeout_micros,
|
| 147 |
+
max_enqueued_batches=max_enqueued_batches,
|
| 148 |
+
allowed_batch_sizes=allowed_batch_sizes, container=container,
|
| 149 |
+
shared_name=shared_name, batching_queue=batching_queue,
|
| 150 |
+
name=name)
|
| 151 |
+
_result = _outputs[:]
|
| 152 |
+
if _execute.must_record_gradient():
|
| 153 |
+
_attrs = ("num_batch_threads", _op._get_attr_int("num_batch_threads"),
|
| 154 |
+
"max_batch_size", _op._get_attr_int("max_batch_size"),
|
| 155 |
+
"max_enqueued_batches",
|
| 156 |
+
_op._get_attr_int("max_enqueued_batches"),
|
| 157 |
+
"batch_timeout_micros",
|
| 158 |
+
_op._get_attr_int("batch_timeout_micros"),
|
| 159 |
+
"allowed_batch_sizes", _op.get_attr("allowed_batch_sizes"),
|
| 160 |
+
"grad_timeout_micros", _op._get_attr_int("grad_timeout_micros"),
|
| 161 |
+
"container", _op.get_attr("container"), "shared_name",
|
| 162 |
+
_op.get_attr("shared_name"), "batching_queue",
|
| 163 |
+
_op.get_attr("batching_queue"), "T", _op.get_attr("T"))
|
| 164 |
+
_inputs_flat = _op.inputs
|
| 165 |
+
_execute.record_gradient(
|
| 166 |
+
"Batch", _inputs_flat, _attrs, _result)
|
| 167 |
+
_result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
|
| 168 |
+
_result = _BatchOutput._make(_result)
|
| 169 |
+
return _result
|
| 170 |
+
|
| 171 |
+
Batch = tf_export("raw_ops.Batch")(_ops.to_raw_op(batch))
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def batch_eager_fallback(in_tensors, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, grad_timeout_micros: int, max_enqueued_batches: int, allowed_batch_sizes, container: str, shared_name: str, batching_queue: str, name, ctx):
|
| 175 |
+
num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
|
| 176 |
+
max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
|
| 177 |
+
batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
|
| 178 |
+
grad_timeout_micros = _execute.make_int(grad_timeout_micros, "grad_timeout_micros")
|
| 179 |
+
if max_enqueued_batches is None:
|
| 180 |
+
max_enqueued_batches = 10
|
| 181 |
+
max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
|
| 182 |
+
if allowed_batch_sizes is None:
|
| 183 |
+
allowed_batch_sizes = []
|
| 184 |
+
if not isinstance(allowed_batch_sizes, (list, tuple)):
|
| 185 |
+
raise TypeError(
|
| 186 |
+
"Expected list for 'allowed_batch_sizes' argument to "
|
| 187 |
+
"'batch' Op, not %r." % allowed_batch_sizes)
|
| 188 |
+
allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
|
| 189 |
+
if container is None:
|
| 190 |
+
container = ""
|
| 191 |
+
container = _execute.make_str(container, "container")
|
| 192 |
+
if shared_name is None:
|
| 193 |
+
shared_name = ""
|
| 194 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 195 |
+
if batching_queue is None:
|
| 196 |
+
batching_queue = ""
|
| 197 |
+
batching_queue = _execute.make_str(batching_queue, "batching_queue")
|
| 198 |
+
_attr_T, in_tensors = _execute.convert_to_mixed_eager_tensors(in_tensors, ctx)
|
| 199 |
+
_inputs_flat = list(in_tensors)
|
| 200 |
+
_attrs = ("num_batch_threads", num_batch_threads, "max_batch_size",
|
| 201 |
+
max_batch_size, "max_enqueued_batches", max_enqueued_batches,
|
| 202 |
+
"batch_timeout_micros", batch_timeout_micros, "allowed_batch_sizes",
|
| 203 |
+
allowed_batch_sizes, "grad_timeout_micros", grad_timeout_micros,
|
| 204 |
+
"container", container, "shared_name", shared_name, "batching_queue",
|
| 205 |
+
batching_queue, "T", _attr_T)
|
| 206 |
+
_result = _execute.execute(b"Batch", len(in_tensors) + 2,
|
| 207 |
+
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
|
| 208 |
+
name=name)
|
| 209 |
+
if _execute.must_record_gradient():
|
| 210 |
+
_execute.record_gradient(
|
| 211 |
+
"Batch", _inputs_flat, _attrs, _result)
|
| 212 |
+
_result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
|
| 213 |
+
_result = _BatchOutput._make(_result)
|
| 214 |
+
return _result
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def batch_function(in_tensors, captured_tensors, f, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, Tout, max_enqueued_batches:int=10, allowed_batch_sizes=[], container:str="", shared_name:str="", batching_queue:str="", low_priority_max_batch_size:int=0, low_priority_batch_timeout_micros:int=0, low_priority_allowed_batch_sizes=[], low_priority_max_enqueued_batches:int=0, enable_large_batch_splitting:bool=False, name=None):
|
| 218 |
+
r"""Batches all the inputs tensors to the computation done by the function.
|
| 219 |
+
|
| 220 |
+
So, for example, in the following code
|
| 221 |
+
|
| 222 |
+
```python
|
| 223 |
+
|
| 224 |
+
# This input will be captured.
|
| 225 |
+
y = tf.placeholder_with_default(1.0, shape=[])
|
| 226 |
+
|
| 227 |
+
@tf.Defun(tf.float32)
|
| 228 |
+
def computation(a):
|
| 229 |
+
return tf.matmul(a, a) + y
|
| 230 |
+
|
| 231 |
+
b = gen_batch_ops.batch_function(
|
| 232 |
+
f=computation
|
| 233 |
+
in_tensors=[a],
|
| 234 |
+
captured_tensors=computation.captured_inputs,
|
| 235 |
+
Tout=[o.type for o in computation.definition.signature.output_arg],
|
| 236 |
+
num_batch_threads=1,
|
| 237 |
+
max_batch_size=10,
|
| 238 |
+
batch_timeout_micros=100000, # 100ms
|
| 239 |
+
allowed_batch_sizes=[3, 10],
|
| 240 |
+
batching_queue="")
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
If more than one session.run call is simultaneously trying to compute `b`
|
| 244 |
+
the values of `a` will be gathered, non-deterministically concatenated
|
| 245 |
+
along the first axis, and only one thread will run the computation.
|
| 246 |
+
|
| 247 |
+
Assumes that all arguments of the function are Tensors which will be batched
|
| 248 |
+
along their first dimension.
|
| 249 |
+
|
| 250 |
+
Arguments that are captured, are not batched. The session.run call which does
|
| 251 |
+
the concatenation, will use the values of the captured tensors available to it.
|
| 252 |
+
Therefore, typical uses of captured tensors should involve values which remain
|
| 253 |
+
unchanged across session.run calls. Inference is a good example of this.
|
| 254 |
+
|
| 255 |
+
SparseTensor is not supported. The return value of the decorated function
|
| 256 |
+
must be a Tensor or a list/tuple of Tensors.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
in_tensors: A list of `Tensor` objects. The tensors to be batched.
|
| 260 |
+
captured_tensors: A list of `Tensor` objects.
|
| 261 |
+
The tensors which are captured in the function, and don't need
|
| 262 |
+
to be batched.
|
| 263 |
+
f: A function decorated with @Defun.
|
| 264 |
+
num_batch_threads: An `int`.
|
| 265 |
+
Number of scheduling threads for processing batches of work.
|
| 266 |
+
Determines the number of batches processed in parallel.
|
| 267 |
+
max_batch_size: An `int`. Batch sizes will never be bigger than this.
|
| 268 |
+
batch_timeout_micros: An `int`.
|
| 269 |
+
Maximum number of microseconds to wait before outputting
|
| 270 |
+
an incomplete batch.
|
| 271 |
+
Tout: A list of `tf.DTypes` that has length `>= 1`.
|
| 272 |
+
the types of the output tensors.
|
| 273 |
+
max_enqueued_batches: An optional `int`. Defaults to `10`.
|
| 274 |
+
Maximum number of batches enqueued. Default: 10.
|
| 275 |
+
allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
|
| 276 |
+
Optional list of allowed batch sizes. If left empty, does
|
| 277 |
+
nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
|
| 278 |
+
batches up to one of those sizes. The entries must increase monotonically.
|
| 279 |
+
If enable_large_batch_splitting is false (i.e., large-input-split is not
|
| 280 |
+
enabled) the final entry must equal max_batch_size.
|
| 281 |
+
container: An optional `string`. Defaults to `""`.
|
| 282 |
+
Controls the scope of sharing of this batch.
|
| 283 |
+
shared_name: An optional `string`. Defaults to `""`.
|
| 284 |
+
Concurrently running instances of batch in the same device with the
|
| 285 |
+
same container and shared_name will batch their elements together. If left
|
| 286 |
+
empty, the op name will be used as the shared name.
|
| 287 |
+
batching_queue: An optional `string`. Defaults to `""`.
|
| 288 |
+
low_priority_max_batch_size: An optional `int`. Defaults to `0`.
|
| 289 |
+
low_priority_batch_timeout_micros: An optional `int`. Defaults to `0`.
|
| 290 |
+
low_priority_allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
|
| 291 |
+
low_priority_max_enqueued_batches: An optional `int`. Defaults to `0`.
|
| 292 |
+
enable_large_batch_splitting: An optional `bool`. Defaults to `False`.
|
| 293 |
+
input with a large size (i.e., larger than the largest value of
|
| 294 |
+
`allowed_batch_sizes`) will be splitted into multiple batches with batch size.
|
| 295 |
+
name: A name for the operation (optional).
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
A list of `Tensor` objects of type `Tout`.
|
| 299 |
+
"""
|
| 300 |
+
_ctx = _context._context or _context.context()
|
| 301 |
+
tld = _ctx._thread_local_data
|
| 302 |
+
if tld.is_eager:
|
| 303 |
+
try:
|
| 304 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 305 |
+
_ctx, "BatchFunction", name, in_tensors, captured_tensors, "f", f,
|
| 306 |
+
"num_batch_threads", num_batch_threads, "max_batch_size",
|
| 307 |
+
max_batch_size, "batch_timeout_micros", batch_timeout_micros,
|
| 308 |
+
"max_enqueued_batches", max_enqueued_batches, "allowed_batch_sizes",
|
| 309 |
+
allowed_batch_sizes, "container", container, "shared_name",
|
| 310 |
+
shared_name, "batching_queue", batching_queue,
|
| 311 |
+
"low_priority_max_batch_size", low_priority_max_batch_size,
|
| 312 |
+
"low_priority_batch_timeout_micros",
|
| 313 |
+
low_priority_batch_timeout_micros, "low_priority_allowed_batch_sizes",
|
| 314 |
+
low_priority_allowed_batch_sizes, "low_priority_max_enqueued_batches",
|
| 315 |
+
low_priority_max_enqueued_batches, "Tout", Tout,
|
| 316 |
+
"enable_large_batch_splitting", enable_large_batch_splitting)
|
| 317 |
+
return _result
|
| 318 |
+
except _core._NotOkStatusException as e:
|
| 319 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 320 |
+
except _core._FallbackException:
|
| 321 |
+
pass
|
| 322 |
+
try:
|
| 323 |
+
return batch_function_eager_fallback(
|
| 324 |
+
in_tensors, captured_tensors, f=f,
|
| 325 |
+
num_batch_threads=num_batch_threads, max_batch_size=max_batch_size,
|
| 326 |
+
batch_timeout_micros=batch_timeout_micros,
|
| 327 |
+
max_enqueued_batches=max_enqueued_batches,
|
| 328 |
+
allowed_batch_sizes=allowed_batch_sizes, container=container,
|
| 329 |
+
shared_name=shared_name, batching_queue=batching_queue,
|
| 330 |
+
low_priority_max_batch_size=low_priority_max_batch_size,
|
| 331 |
+
low_priority_batch_timeout_micros=low_priority_batch_timeout_micros,
|
| 332 |
+
low_priority_allowed_batch_sizes=low_priority_allowed_batch_sizes,
|
| 333 |
+
low_priority_max_enqueued_batches=low_priority_max_enqueued_batches,
|
| 334 |
+
Tout=Tout,
|
| 335 |
+
enable_large_batch_splitting=enable_large_batch_splitting,
|
| 336 |
+
name=name, ctx=_ctx)
|
| 337 |
+
except _core._SymbolicException:
|
| 338 |
+
pass # Add nodes to the TensorFlow graph.
|
| 339 |
+
# Add nodes to the TensorFlow graph.
|
| 340 |
+
num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
|
| 341 |
+
max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
|
| 342 |
+
batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
|
| 343 |
+
if not isinstance(Tout, (list, tuple)):
|
| 344 |
+
raise TypeError(
|
| 345 |
+
"Expected list for 'Tout' argument to "
|
| 346 |
+
"'batch_function' Op, not %r." % Tout)
|
| 347 |
+
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
|
| 348 |
+
if max_enqueued_batches is None:
|
| 349 |
+
max_enqueued_batches = 10
|
| 350 |
+
max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
|
| 351 |
+
if allowed_batch_sizes is None:
|
| 352 |
+
allowed_batch_sizes = []
|
| 353 |
+
if not isinstance(allowed_batch_sizes, (list, tuple)):
|
| 354 |
+
raise TypeError(
|
| 355 |
+
"Expected list for 'allowed_batch_sizes' argument to "
|
| 356 |
+
"'batch_function' Op, not %r." % allowed_batch_sizes)
|
| 357 |
+
allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
|
| 358 |
+
if container is None:
|
| 359 |
+
container = ""
|
| 360 |
+
container = _execute.make_str(container, "container")
|
| 361 |
+
if shared_name is None:
|
| 362 |
+
shared_name = ""
|
| 363 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 364 |
+
if batching_queue is None:
|
| 365 |
+
batching_queue = ""
|
| 366 |
+
batching_queue = _execute.make_str(batching_queue, "batching_queue")
|
| 367 |
+
if low_priority_max_batch_size is None:
|
| 368 |
+
low_priority_max_batch_size = 0
|
| 369 |
+
low_priority_max_batch_size = _execute.make_int(low_priority_max_batch_size, "low_priority_max_batch_size")
|
| 370 |
+
if low_priority_batch_timeout_micros is None:
|
| 371 |
+
low_priority_batch_timeout_micros = 0
|
| 372 |
+
low_priority_batch_timeout_micros = _execute.make_int(low_priority_batch_timeout_micros, "low_priority_batch_timeout_micros")
|
| 373 |
+
if low_priority_allowed_batch_sizes is None:
|
| 374 |
+
low_priority_allowed_batch_sizes = []
|
| 375 |
+
if not isinstance(low_priority_allowed_batch_sizes, (list, tuple)):
|
| 376 |
+
raise TypeError(
|
| 377 |
+
"Expected list for 'low_priority_allowed_batch_sizes' argument to "
|
| 378 |
+
"'batch_function' Op, not %r." % low_priority_allowed_batch_sizes)
|
| 379 |
+
low_priority_allowed_batch_sizes = [_execute.make_int(_i, "low_priority_allowed_batch_sizes") for _i in low_priority_allowed_batch_sizes]
|
| 380 |
+
if low_priority_max_enqueued_batches is None:
|
| 381 |
+
low_priority_max_enqueued_batches = 0
|
| 382 |
+
low_priority_max_enqueued_batches = _execute.make_int(low_priority_max_enqueued_batches, "low_priority_max_enqueued_batches")
|
| 383 |
+
if enable_large_batch_splitting is None:
|
| 384 |
+
enable_large_batch_splitting = False
|
| 385 |
+
enable_large_batch_splitting = _execute.make_bool(enable_large_batch_splitting, "enable_large_batch_splitting")
|
| 386 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 387 |
+
"BatchFunction", in_tensors=in_tensors,
|
| 388 |
+
captured_tensors=captured_tensors, f=f,
|
| 389 |
+
num_batch_threads=num_batch_threads,
|
| 390 |
+
max_batch_size=max_batch_size,
|
| 391 |
+
batch_timeout_micros=batch_timeout_micros, Tout=Tout,
|
| 392 |
+
max_enqueued_batches=max_enqueued_batches,
|
| 393 |
+
allowed_batch_sizes=allowed_batch_sizes,
|
| 394 |
+
container=container, shared_name=shared_name,
|
| 395 |
+
batching_queue=batching_queue,
|
| 396 |
+
low_priority_max_batch_size=low_priority_max_batch_size,
|
| 397 |
+
low_priority_batch_timeout_micros=low_priority_batch_timeout_micros,
|
| 398 |
+
low_priority_allowed_batch_sizes=low_priority_allowed_batch_sizes,
|
| 399 |
+
low_priority_max_enqueued_batches=low_priority_max_enqueued_batches,
|
| 400 |
+
enable_large_batch_splitting=enable_large_batch_splitting,
|
| 401 |
+
name=name)
|
| 402 |
+
_result = _outputs[:]
|
| 403 |
+
if _execute.must_record_gradient():
|
| 404 |
+
_attrs = ("f", _op.get_attr("f"), "num_batch_threads",
|
| 405 |
+
_op._get_attr_int("num_batch_threads"), "max_batch_size",
|
| 406 |
+
_op._get_attr_int("max_batch_size"), "batch_timeout_micros",
|
| 407 |
+
_op._get_attr_int("batch_timeout_micros"),
|
| 408 |
+
"max_enqueued_batches",
|
| 409 |
+
_op._get_attr_int("max_enqueued_batches"),
|
| 410 |
+
"allowed_batch_sizes", _op.get_attr("allowed_batch_sizes"),
|
| 411 |
+
"container", _op.get_attr("container"), "shared_name",
|
| 412 |
+
_op.get_attr("shared_name"), "batching_queue",
|
| 413 |
+
_op.get_attr("batching_queue"), "low_priority_max_batch_size",
|
| 414 |
+
_op._get_attr_int("low_priority_max_batch_size"),
|
| 415 |
+
"low_priority_batch_timeout_micros",
|
| 416 |
+
_op._get_attr_int("low_priority_batch_timeout_micros"),
|
| 417 |
+
"low_priority_allowed_batch_sizes",
|
| 418 |
+
_op.get_attr("low_priority_allowed_batch_sizes"),
|
| 419 |
+
"low_priority_max_enqueued_batches",
|
| 420 |
+
_op._get_attr_int("low_priority_max_enqueued_batches"), "Tin",
|
| 421 |
+
_op.get_attr("Tin"), "Tcaptured", _op.get_attr("Tcaptured"),
|
| 422 |
+
"Tout", _op.get_attr("Tout"), "enable_large_batch_splitting",
|
| 423 |
+
_op._get_attr_bool("enable_large_batch_splitting"))
|
| 424 |
+
_inputs_flat = _op.inputs
|
| 425 |
+
_execute.record_gradient(
|
| 426 |
+
"BatchFunction", _inputs_flat, _attrs, _result)
|
| 427 |
+
return _result
|
| 428 |
+
|
| 429 |
+
BatchFunction = tf_export("raw_ops.BatchFunction")(_ops.to_raw_op(batch_function))
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def batch_function_eager_fallback(in_tensors, captured_tensors, f, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, Tout, max_enqueued_batches: int, allowed_batch_sizes, container: str, shared_name: str, batching_queue: str, low_priority_max_batch_size: int, low_priority_batch_timeout_micros: int, low_priority_allowed_batch_sizes, low_priority_max_enqueued_batches: int, enable_large_batch_splitting: bool, name, ctx):
|
| 433 |
+
num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
|
| 434 |
+
max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
|
| 435 |
+
batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
|
| 436 |
+
if not isinstance(Tout, (list, tuple)):
|
| 437 |
+
raise TypeError(
|
| 438 |
+
"Expected list for 'Tout' argument to "
|
| 439 |
+
"'batch_function' Op, not %r." % Tout)
|
| 440 |
+
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
|
| 441 |
+
if max_enqueued_batches is None:
|
| 442 |
+
max_enqueued_batches = 10
|
| 443 |
+
max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
|
| 444 |
+
if allowed_batch_sizes is None:
|
| 445 |
+
allowed_batch_sizes = []
|
| 446 |
+
if not isinstance(allowed_batch_sizes, (list, tuple)):
|
| 447 |
+
raise TypeError(
|
| 448 |
+
"Expected list for 'allowed_batch_sizes' argument to "
|
| 449 |
+
"'batch_function' Op, not %r." % allowed_batch_sizes)
|
| 450 |
+
allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
|
| 451 |
+
if container is None:
|
| 452 |
+
container = ""
|
| 453 |
+
container = _execute.make_str(container, "container")
|
| 454 |
+
if shared_name is None:
|
| 455 |
+
shared_name = ""
|
| 456 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 457 |
+
if batching_queue is None:
|
| 458 |
+
batching_queue = ""
|
| 459 |
+
batching_queue = _execute.make_str(batching_queue, "batching_queue")
|
| 460 |
+
if low_priority_max_batch_size is None:
|
| 461 |
+
low_priority_max_batch_size = 0
|
| 462 |
+
low_priority_max_batch_size = _execute.make_int(low_priority_max_batch_size, "low_priority_max_batch_size")
|
| 463 |
+
if low_priority_batch_timeout_micros is None:
|
| 464 |
+
low_priority_batch_timeout_micros = 0
|
| 465 |
+
low_priority_batch_timeout_micros = _execute.make_int(low_priority_batch_timeout_micros, "low_priority_batch_timeout_micros")
|
| 466 |
+
if low_priority_allowed_batch_sizes is None:
|
| 467 |
+
low_priority_allowed_batch_sizes = []
|
| 468 |
+
if not isinstance(low_priority_allowed_batch_sizes, (list, tuple)):
|
| 469 |
+
raise TypeError(
|
| 470 |
+
"Expected list for 'low_priority_allowed_batch_sizes' argument to "
|
| 471 |
+
"'batch_function' Op, not %r." % low_priority_allowed_batch_sizes)
|
| 472 |
+
low_priority_allowed_batch_sizes = [_execute.make_int(_i, "low_priority_allowed_batch_sizes") for _i in low_priority_allowed_batch_sizes]
|
| 473 |
+
if low_priority_max_enqueued_batches is None:
|
| 474 |
+
low_priority_max_enqueued_batches = 0
|
| 475 |
+
low_priority_max_enqueued_batches = _execute.make_int(low_priority_max_enqueued_batches, "low_priority_max_enqueued_batches")
|
| 476 |
+
if enable_large_batch_splitting is None:
|
| 477 |
+
enable_large_batch_splitting = False
|
| 478 |
+
enable_large_batch_splitting = _execute.make_bool(enable_large_batch_splitting, "enable_large_batch_splitting")
|
| 479 |
+
_attr_Tin, in_tensors = _execute.convert_to_mixed_eager_tensors(in_tensors, ctx)
|
| 480 |
+
_attr_Tcaptured, captured_tensors = _execute.convert_to_mixed_eager_tensors(captured_tensors, ctx)
|
| 481 |
+
_inputs_flat = list(in_tensors) + list(captured_tensors)
|
| 482 |
+
_attrs = ("f", f, "num_batch_threads", num_batch_threads, "max_batch_size",
|
| 483 |
+
max_batch_size, "batch_timeout_micros", batch_timeout_micros,
|
| 484 |
+
"max_enqueued_batches", max_enqueued_batches, "allowed_batch_sizes",
|
| 485 |
+
allowed_batch_sizes, "container", container, "shared_name", shared_name,
|
| 486 |
+
"batching_queue", batching_queue, "low_priority_max_batch_size",
|
| 487 |
+
low_priority_max_batch_size, "low_priority_batch_timeout_micros",
|
| 488 |
+
low_priority_batch_timeout_micros, "low_priority_allowed_batch_sizes",
|
| 489 |
+
low_priority_allowed_batch_sizes, "low_priority_max_enqueued_batches",
|
| 490 |
+
low_priority_max_enqueued_batches, "Tin", _attr_Tin, "Tcaptured",
|
| 491 |
+
_attr_Tcaptured, "Tout", Tout, "enable_large_batch_splitting",
|
| 492 |
+
enable_large_batch_splitting)
|
| 493 |
+
_result = _execute.execute(b"BatchFunction", len(Tout), inputs=_inputs_flat,
|
| 494 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 495 |
+
if _execute.must_record_gradient():
|
| 496 |
+
_execute.record_gradient(
|
| 497 |
+
"BatchFunction", _inputs_flat, _attrs, _result)
|
| 498 |
+
return _result
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
TV_Unbatch_T = TypeVar("TV_Unbatch_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant)
|
| 502 |
+
|
| 503 |
+
def unbatch(batched_tensor: Annotated[Any, TV_Unbatch_T], batch_index: Annotated[Any, _atypes.Int64], id: Annotated[Any, _atypes.Int64], timeout_micros: int, container:str="", shared_name:str="", name=None) -> Annotated[Any, TV_Unbatch_T]:
|
| 504 |
+
r"""Reverses the operation of Batch for a single output Tensor.
|
| 505 |
+
|
| 506 |
+
An instance of Unbatch either receives an empty batched_tensor, in which case it
|
| 507 |
+
asynchronously waits until the values become available from a concurrently
|
| 508 |
+
running instance of Unbatch with the same container and shared_name, or receives
|
| 509 |
+
a non-empty batched_tensor in which case it finalizes all other concurrently
|
| 510 |
+
running instances and outputs its own element from the batch.
|
| 511 |
+
|
| 512 |
+
batched_tensor: The possibly transformed output of Batch. The size of the first
|
| 513 |
+
dimension should remain unchanged by the transformations for the operation to
|
| 514 |
+
work.
|
| 515 |
+
batch_index: The matching batch_index obtained from Batch.
|
| 516 |
+
id: The id scalar emitted by Batch.
|
| 517 |
+
unbatched_tensor: The Tensor corresponding to this execution.
|
| 518 |
+
timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
|
| 519 |
+
batched input tensor associated with a given invocation of the op.
|
| 520 |
+
container: Container to control resource sharing.
|
| 521 |
+
shared_name: Instances of Unbatch with the same container and shared_name are
|
| 522 |
+
assumed to possibly belong to the same batch. If left empty, the op name will
|
| 523 |
+
be used as the shared name.
|
| 524 |
+
|
| 525 |
+
Args:
|
| 526 |
+
batched_tensor: A `Tensor`.
|
| 527 |
+
batch_index: A `Tensor` of type `int64`.
|
| 528 |
+
id: A `Tensor` of type `int64`.
|
| 529 |
+
timeout_micros: An `int`.
|
| 530 |
+
container: An optional `string`. Defaults to `""`.
|
| 531 |
+
shared_name: An optional `string`. Defaults to `""`.
|
| 532 |
+
name: A name for the operation (optional).
|
| 533 |
+
|
| 534 |
+
Returns:
|
| 535 |
+
A `Tensor`. Has the same type as `batched_tensor`.
|
| 536 |
+
"""
|
| 537 |
+
_ctx = _context._context or _context.context()
|
| 538 |
+
tld = _ctx._thread_local_data
|
| 539 |
+
if tld.is_eager:
|
| 540 |
+
try:
|
| 541 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 542 |
+
_ctx, "Unbatch", name, batched_tensor, batch_index, id,
|
| 543 |
+
"timeout_micros", timeout_micros, "container", container,
|
| 544 |
+
"shared_name", shared_name)
|
| 545 |
+
return _result
|
| 546 |
+
except _core._NotOkStatusException as e:
|
| 547 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 548 |
+
except _core._FallbackException:
|
| 549 |
+
pass
|
| 550 |
+
try:
|
| 551 |
+
return unbatch_eager_fallback(
|
| 552 |
+
batched_tensor, batch_index, id, timeout_micros=timeout_micros,
|
| 553 |
+
container=container, shared_name=shared_name, name=name, ctx=_ctx)
|
| 554 |
+
except _core._SymbolicException:
|
| 555 |
+
pass # Add nodes to the TensorFlow graph.
|
| 556 |
+
# Add nodes to the TensorFlow graph.
|
| 557 |
+
timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
|
| 558 |
+
if container is None:
|
| 559 |
+
container = ""
|
| 560 |
+
container = _execute.make_str(container, "container")
|
| 561 |
+
if shared_name is None:
|
| 562 |
+
shared_name = ""
|
| 563 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 564 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 565 |
+
"Unbatch", batched_tensor=batched_tensor, batch_index=batch_index,
|
| 566 |
+
id=id, timeout_micros=timeout_micros, container=container,
|
| 567 |
+
shared_name=shared_name, name=name)
|
| 568 |
+
_result = _outputs[:]
|
| 569 |
+
if _execute.must_record_gradient():
|
| 570 |
+
_attrs = ("timeout_micros", _op._get_attr_int("timeout_micros"),
|
| 571 |
+
"container", _op.get_attr("container"), "shared_name",
|
| 572 |
+
_op.get_attr("shared_name"), "T", _op._get_attr_type("T"))
|
| 573 |
+
_inputs_flat = _op.inputs
|
| 574 |
+
_execute.record_gradient(
|
| 575 |
+
"Unbatch", _inputs_flat, _attrs, _result)
|
| 576 |
+
_result, = _result
|
| 577 |
+
return _result
|
| 578 |
+
|
| 579 |
+
Unbatch = tf_export("raw_ops.Unbatch")(_ops.to_raw_op(unbatch))
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def unbatch_eager_fallback(batched_tensor: Annotated[Any, TV_Unbatch_T], batch_index: Annotated[Any, _atypes.Int64], id: Annotated[Any, _atypes.Int64], timeout_micros: int, container: str, shared_name: str, name, ctx) -> Annotated[Any, TV_Unbatch_T]:
|
| 583 |
+
timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
|
| 584 |
+
if container is None:
|
| 585 |
+
container = ""
|
| 586 |
+
container = _execute.make_str(container, "container")
|
| 587 |
+
if shared_name is None:
|
| 588 |
+
shared_name = ""
|
| 589 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 590 |
+
_attr_T, (batched_tensor,) = _execute.args_to_matching_eager([batched_tensor], ctx, [])
|
| 591 |
+
batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64)
|
| 592 |
+
id = _ops.convert_to_tensor(id, _dtypes.int64)
|
| 593 |
+
_inputs_flat = [batched_tensor, batch_index, id]
|
| 594 |
+
_attrs = ("timeout_micros", timeout_micros, "container", container,
|
| 595 |
+
"shared_name", shared_name, "T", _attr_T)
|
| 596 |
+
_result = _execute.execute(b"Unbatch", 1, inputs=_inputs_flat, attrs=_attrs,
|
| 597 |
+
ctx=ctx, name=name)
|
| 598 |
+
if _execute.must_record_gradient():
|
| 599 |
+
_execute.record_gradient(
|
| 600 |
+
"Unbatch", _inputs_flat, _attrs, _result)
|
| 601 |
+
_result, = _result
|
| 602 |
+
return _result
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
TV_UnbatchGrad_T = TypeVar("TV_UnbatchGrad_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant)
|
| 606 |
+
|
| 607 |
+
def unbatch_grad(original_input: Annotated[Any, TV_UnbatchGrad_T], batch_index: Annotated[Any, _atypes.Int64], grad: Annotated[Any, TV_UnbatchGrad_T], id: Annotated[Any, _atypes.Int64], container:str="", shared_name:str="", name=None) -> Annotated[Any, TV_UnbatchGrad_T]:
|
| 608 |
+
r"""Gradient of Unbatch.
|
| 609 |
+
|
| 610 |
+
Acts like Batch but using the given batch_index index of batching things as they
|
| 611 |
+
become available. This ensures that the gradients are propagated back in the
|
| 612 |
+
same session which did the forward pass.
|
| 613 |
+
|
| 614 |
+
original_input: The input to the Unbatch operation this is the gradient of.
|
| 615 |
+
batch_index: The batch_index given to the Unbatch operation this is the gradient
|
| 616 |
+
of.
|
| 617 |
+
grad: The downstream gradient.
|
| 618 |
+
id: The id scalar emitted by Batch.
|
| 619 |
+
batched_grad: The return value, either an empty tensor or the batched gradient.
|
| 620 |
+
container: Container to control resource sharing.
|
| 621 |
+
shared_name: Instances of UnbatchGrad with the same container and shared_name
|
| 622 |
+
are assumed to possibly belong to the same batch. If left empty, the op name
|
| 623 |
+
will be used as the shared name.
|
| 624 |
+
|
| 625 |
+
Args:
|
| 626 |
+
original_input: A `Tensor`.
|
| 627 |
+
batch_index: A `Tensor` of type `int64`.
|
| 628 |
+
grad: A `Tensor`. Must have the same type as `original_input`.
|
| 629 |
+
id: A `Tensor` of type `int64`.
|
| 630 |
+
container: An optional `string`. Defaults to `""`.
|
| 631 |
+
shared_name: An optional `string`. Defaults to `""`.
|
| 632 |
+
name: A name for the operation (optional).
|
| 633 |
+
|
| 634 |
+
Returns:
|
| 635 |
+
A `Tensor`. Has the same type as `original_input`.
|
| 636 |
+
"""
|
| 637 |
+
_ctx = _context._context or _context.context()
|
| 638 |
+
tld = _ctx._thread_local_data
|
| 639 |
+
if tld.is_eager:
|
| 640 |
+
try:
|
| 641 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 642 |
+
_ctx, "UnbatchGrad", name, original_input, batch_index, grad, id,
|
| 643 |
+
"container", container, "shared_name", shared_name)
|
| 644 |
+
return _result
|
| 645 |
+
except _core._NotOkStatusException as e:
|
| 646 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 647 |
+
except _core._FallbackException:
|
| 648 |
+
pass
|
| 649 |
+
try:
|
| 650 |
+
return unbatch_grad_eager_fallback(
|
| 651 |
+
original_input, batch_index, grad, id, container=container,
|
| 652 |
+
shared_name=shared_name, name=name, ctx=_ctx)
|
| 653 |
+
except _core._SymbolicException:
|
| 654 |
+
pass # Add nodes to the TensorFlow graph.
|
| 655 |
+
# Add nodes to the TensorFlow graph.
|
| 656 |
+
if container is None:
|
| 657 |
+
container = ""
|
| 658 |
+
container = _execute.make_str(container, "container")
|
| 659 |
+
if shared_name is None:
|
| 660 |
+
shared_name = ""
|
| 661 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 662 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 663 |
+
"UnbatchGrad", original_input=original_input, batch_index=batch_index,
|
| 664 |
+
grad=grad, id=id, container=container,
|
| 665 |
+
shared_name=shared_name, name=name)
|
| 666 |
+
_result = _outputs[:]
|
| 667 |
+
if _execute.must_record_gradient():
|
| 668 |
+
_attrs = ("container", _op.get_attr("container"), "shared_name",
|
| 669 |
+
_op.get_attr("shared_name"), "T", _op._get_attr_type("T"))
|
| 670 |
+
_inputs_flat = _op.inputs
|
| 671 |
+
_execute.record_gradient(
|
| 672 |
+
"UnbatchGrad", _inputs_flat, _attrs, _result)
|
| 673 |
+
_result, = _result
|
| 674 |
+
return _result
|
| 675 |
+
|
| 676 |
+
UnbatchGrad = tf_export("raw_ops.UnbatchGrad")(_ops.to_raw_op(unbatch_grad))
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
def unbatch_grad_eager_fallback(original_input: Annotated[Any, TV_UnbatchGrad_T], batch_index: Annotated[Any, _atypes.Int64], grad: Annotated[Any, TV_UnbatchGrad_T], id: Annotated[Any, _atypes.Int64], container: str, shared_name: str, name, ctx) -> Annotated[Any, TV_UnbatchGrad_T]:
|
| 680 |
+
if container is None:
|
| 681 |
+
container = ""
|
| 682 |
+
container = _execute.make_str(container, "container")
|
| 683 |
+
if shared_name is None:
|
| 684 |
+
shared_name = ""
|
| 685 |
+
shared_name = _execute.make_str(shared_name, "shared_name")
|
| 686 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([original_input, grad], ctx, [])
|
| 687 |
+
(original_input, grad) = _inputs_T
|
| 688 |
+
batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64)
|
| 689 |
+
id = _ops.convert_to_tensor(id, _dtypes.int64)
|
| 690 |
+
_inputs_flat = [original_input, batch_index, grad, id]
|
| 691 |
+
_attrs = ("container", container, "shared_name", shared_name, "T", _attr_T)
|
| 692 |
+
_result = _execute.execute(b"UnbatchGrad", 1, inputs=_inputs_flat,
|
| 693 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 694 |
+
if _execute.must_record_gradient():
|
| 695 |
+
_execute.record_gradient(
|
| 696 |
+
"UnbatchGrad", _inputs_flat, _attrs, _result)
|
| 697 |
+
_result, = _result
|
| 698 |
+
return _result
|
| 699 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_bitwise_ops.py
ADDED
|
@@ -0,0 +1,765 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Python wrappers around TensorFlow ops.
|
| 2 |
+
|
| 3 |
+
This file is MACHINE GENERATED! Do not edit.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
|
| 8 |
+
from tensorflow.python import pywrap_tfe as pywrap_tfe
|
| 9 |
+
from tensorflow.python.eager import context as _context
|
| 10 |
+
from tensorflow.python.eager import core as _core
|
| 11 |
+
from tensorflow.python.eager import execute as _execute
|
| 12 |
+
from tensorflow.python.framework import dtypes as _dtypes
|
| 13 |
+
from tensorflow.security.fuzzing.py import annotation_types as _atypes
|
| 14 |
+
|
| 15 |
+
from tensorflow.python.framework import op_def_registry as _op_def_registry
|
| 16 |
+
from tensorflow.python.framework import ops as _ops
|
| 17 |
+
from tensorflow.python.framework import op_def_library as _op_def_library
|
| 18 |
+
from tensorflow.python.util.deprecation import deprecated_endpoints
|
| 19 |
+
from tensorflow.python.util import dispatch as _dispatch
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
from typing import TypeVar, List, Any
|
| 23 |
+
from typing_extensions import Annotated
|
| 24 |
+
|
| 25 |
+
TV_BitwiseAnd_T = TypeVar("TV_BitwiseAnd_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 26 |
+
|
| 27 |
+
@_dispatch.add_fallback_dispatch_list
|
| 28 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 29 |
+
@tf_export('bitwise.bitwise_and')
|
| 30 |
+
def bitwise_and(x: Annotated[Any, TV_BitwiseAnd_T], y: Annotated[Any, TV_BitwiseAnd_T], name=None) -> Annotated[Any, TV_BitwiseAnd_T]:
|
| 31 |
+
r"""Elementwise computes the bitwise AND of `x` and `y`.
|
| 32 |
+
|
| 33 |
+
The result will have those bits set, that are set in both `x` and `y`. The
|
| 34 |
+
computation is performed on the underlying representations of `x` and `y`.
|
| 35 |
+
|
| 36 |
+
For example:
|
| 37 |
+
|
| 38 |
+
```python
|
| 39 |
+
import tensorflow as tf
|
| 40 |
+
from tensorflow.python.ops import bitwise_ops
|
| 41 |
+
dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
|
| 42 |
+
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
|
| 43 |
+
|
| 44 |
+
for dtype in dtype_list:
|
| 45 |
+
lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
|
| 46 |
+
rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
|
| 47 |
+
exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
|
| 48 |
+
|
| 49 |
+
res = bitwise_ops.bitwise_and(lhs, rhs)
|
| 50 |
+
tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 55 |
+
y: A `Tensor`. Must have the same type as `x`.
|
| 56 |
+
name: A name for the operation (optional).
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
A `Tensor`. Has the same type as `x`.
|
| 60 |
+
"""
|
| 61 |
+
_ctx = _context._context or _context.context()
|
| 62 |
+
tld = _ctx._thread_local_data
|
| 63 |
+
if tld.is_eager:
|
| 64 |
+
try:
|
| 65 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 66 |
+
_ctx, "BitwiseAnd", name, x, y)
|
| 67 |
+
return _result
|
| 68 |
+
except _core._NotOkStatusException as e:
|
| 69 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 70 |
+
except _core._FallbackException:
|
| 71 |
+
pass
|
| 72 |
+
try:
|
| 73 |
+
_result = _dispatcher_for_bitwise_and(
|
| 74 |
+
(x, y, name,), None)
|
| 75 |
+
if _result is not NotImplemented:
|
| 76 |
+
return _result
|
| 77 |
+
return bitwise_and_eager_fallback(
|
| 78 |
+
x, y, name=name, ctx=_ctx)
|
| 79 |
+
except _core._SymbolicException:
|
| 80 |
+
pass # Add nodes to the TensorFlow graph.
|
| 81 |
+
except (TypeError, ValueError):
|
| 82 |
+
_result = _dispatch.dispatch(
|
| 83 |
+
bitwise_and, (), dict(x=x, y=y, name=name)
|
| 84 |
+
)
|
| 85 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 86 |
+
return _result
|
| 87 |
+
raise
|
| 88 |
+
else:
|
| 89 |
+
_result = _dispatcher_for_bitwise_and(
|
| 90 |
+
(x, y, name,), None)
|
| 91 |
+
if _result is not NotImplemented:
|
| 92 |
+
return _result
|
| 93 |
+
# Add nodes to the TensorFlow graph.
|
| 94 |
+
try:
|
| 95 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 96 |
+
"BitwiseAnd", x=x, y=y, name=name)
|
| 97 |
+
except (TypeError, ValueError):
|
| 98 |
+
_result = _dispatch.dispatch(
|
| 99 |
+
bitwise_and, (), dict(x=x, y=y, name=name)
|
| 100 |
+
)
|
| 101 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 102 |
+
return _result
|
| 103 |
+
raise
|
| 104 |
+
_result = _outputs[:]
|
| 105 |
+
if _execute.must_record_gradient():
|
| 106 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 107 |
+
_inputs_flat = _op.inputs
|
| 108 |
+
_execute.record_gradient(
|
| 109 |
+
"BitwiseAnd", _inputs_flat, _attrs, _result)
|
| 110 |
+
_result, = _result
|
| 111 |
+
return _result
|
| 112 |
+
|
| 113 |
+
BitwiseAnd = tf_export("raw_ops.BitwiseAnd")(_ops.to_raw_op(bitwise_and))
|
| 114 |
+
_dispatcher_for_bitwise_and = bitwise_and._tf_type_based_dispatcher.Dispatch
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def bitwise_and_eager_fallback(x: Annotated[Any, TV_BitwiseAnd_T], y: Annotated[Any, TV_BitwiseAnd_T], name, ctx) -> Annotated[Any, TV_BitwiseAnd_T]:
|
| 118 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 119 |
+
(x, y) = _inputs_T
|
| 120 |
+
_inputs_flat = [x, y]
|
| 121 |
+
_attrs = ("T", _attr_T)
|
| 122 |
+
_result = _execute.execute(b"BitwiseAnd", 1, inputs=_inputs_flat,
|
| 123 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 124 |
+
if _execute.must_record_gradient():
|
| 125 |
+
_execute.record_gradient(
|
| 126 |
+
"BitwiseAnd", _inputs_flat, _attrs, _result)
|
| 127 |
+
_result, = _result
|
| 128 |
+
return _result
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
TV_BitwiseOr_T = TypeVar("TV_BitwiseOr_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 132 |
+
|
| 133 |
+
@_dispatch.add_fallback_dispatch_list
|
| 134 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 135 |
+
@tf_export('bitwise.bitwise_or')
|
| 136 |
+
def bitwise_or(x: Annotated[Any, TV_BitwiseOr_T], y: Annotated[Any, TV_BitwiseOr_T], name=None) -> Annotated[Any, TV_BitwiseOr_T]:
|
| 137 |
+
r"""Elementwise computes the bitwise OR of `x` and `y`.
|
| 138 |
+
|
| 139 |
+
The result will have those bits set, that are set in `x`, `y` or both. The
|
| 140 |
+
computation is performed on the underlying representations of `x` and `y`.
|
| 141 |
+
|
| 142 |
+
For example:
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
import tensorflow as tf
|
| 146 |
+
from tensorflow.python.ops import bitwise_ops
|
| 147 |
+
dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
|
| 148 |
+
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
|
| 149 |
+
|
| 150 |
+
for dtype in dtype_list:
|
| 151 |
+
lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
|
| 152 |
+
rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
|
| 153 |
+
exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
|
| 154 |
+
|
| 155 |
+
res = bitwise_ops.bitwise_or(lhs, rhs)
|
| 156 |
+
tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 161 |
+
y: A `Tensor`. Must have the same type as `x`.
|
| 162 |
+
name: A name for the operation (optional).
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
A `Tensor`. Has the same type as `x`.
|
| 166 |
+
"""
|
| 167 |
+
_ctx = _context._context or _context.context()
|
| 168 |
+
tld = _ctx._thread_local_data
|
| 169 |
+
if tld.is_eager:
|
| 170 |
+
try:
|
| 171 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 172 |
+
_ctx, "BitwiseOr", name, x, y)
|
| 173 |
+
return _result
|
| 174 |
+
except _core._NotOkStatusException as e:
|
| 175 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 176 |
+
except _core._FallbackException:
|
| 177 |
+
pass
|
| 178 |
+
try:
|
| 179 |
+
_result = _dispatcher_for_bitwise_or(
|
| 180 |
+
(x, y, name,), None)
|
| 181 |
+
if _result is not NotImplemented:
|
| 182 |
+
return _result
|
| 183 |
+
return bitwise_or_eager_fallback(
|
| 184 |
+
x, y, name=name, ctx=_ctx)
|
| 185 |
+
except _core._SymbolicException:
|
| 186 |
+
pass # Add nodes to the TensorFlow graph.
|
| 187 |
+
except (TypeError, ValueError):
|
| 188 |
+
_result = _dispatch.dispatch(
|
| 189 |
+
bitwise_or, (), dict(x=x, y=y, name=name)
|
| 190 |
+
)
|
| 191 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 192 |
+
return _result
|
| 193 |
+
raise
|
| 194 |
+
else:
|
| 195 |
+
_result = _dispatcher_for_bitwise_or(
|
| 196 |
+
(x, y, name,), None)
|
| 197 |
+
if _result is not NotImplemented:
|
| 198 |
+
return _result
|
| 199 |
+
# Add nodes to the TensorFlow graph.
|
| 200 |
+
try:
|
| 201 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 202 |
+
"BitwiseOr", x=x, y=y, name=name)
|
| 203 |
+
except (TypeError, ValueError):
|
| 204 |
+
_result = _dispatch.dispatch(
|
| 205 |
+
bitwise_or, (), dict(x=x, y=y, name=name)
|
| 206 |
+
)
|
| 207 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 208 |
+
return _result
|
| 209 |
+
raise
|
| 210 |
+
_result = _outputs[:]
|
| 211 |
+
if _execute.must_record_gradient():
|
| 212 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 213 |
+
_inputs_flat = _op.inputs
|
| 214 |
+
_execute.record_gradient(
|
| 215 |
+
"BitwiseOr", _inputs_flat, _attrs, _result)
|
| 216 |
+
_result, = _result
|
| 217 |
+
return _result
|
| 218 |
+
|
| 219 |
+
BitwiseOr = tf_export("raw_ops.BitwiseOr")(_ops.to_raw_op(bitwise_or))
|
| 220 |
+
_dispatcher_for_bitwise_or = bitwise_or._tf_type_based_dispatcher.Dispatch
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def bitwise_or_eager_fallback(x: Annotated[Any, TV_BitwiseOr_T], y: Annotated[Any, TV_BitwiseOr_T], name, ctx) -> Annotated[Any, TV_BitwiseOr_T]:
|
| 224 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 225 |
+
(x, y) = _inputs_T
|
| 226 |
+
_inputs_flat = [x, y]
|
| 227 |
+
_attrs = ("T", _attr_T)
|
| 228 |
+
_result = _execute.execute(b"BitwiseOr", 1, inputs=_inputs_flat,
|
| 229 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 230 |
+
if _execute.must_record_gradient():
|
| 231 |
+
_execute.record_gradient(
|
| 232 |
+
"BitwiseOr", _inputs_flat, _attrs, _result)
|
| 233 |
+
_result, = _result
|
| 234 |
+
return _result
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
TV_BitwiseXor_T = TypeVar("TV_BitwiseXor_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 238 |
+
|
| 239 |
+
@_dispatch.add_fallback_dispatch_list
|
| 240 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 241 |
+
@tf_export('bitwise.bitwise_xor')
|
| 242 |
+
def bitwise_xor(x: Annotated[Any, TV_BitwiseXor_T], y: Annotated[Any, TV_BitwiseXor_T], name=None) -> Annotated[Any, TV_BitwiseXor_T]:
|
| 243 |
+
r"""Elementwise computes the bitwise XOR of `x` and `y`.
|
| 244 |
+
|
| 245 |
+
The result will have those bits set, that are different in `x` and `y`. The
|
| 246 |
+
computation is performed on the underlying representations of `x` and `y`.
|
| 247 |
+
|
| 248 |
+
For example:
|
| 249 |
+
|
| 250 |
+
```python
|
| 251 |
+
import tensorflow as tf
|
| 252 |
+
from tensorflow.python.ops import bitwise_ops
|
| 253 |
+
dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
|
| 254 |
+
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
|
| 255 |
+
|
| 256 |
+
for dtype in dtype_list:
|
| 257 |
+
lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
|
| 258 |
+
rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
|
| 259 |
+
exp = tf.constant([5, 5, 4, 5], dtype=tf.float32)
|
| 260 |
+
|
| 261 |
+
res = bitwise_ops.bitwise_xor(lhs, rhs)
|
| 262 |
+
tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
|
| 263 |
+
```
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 267 |
+
y: A `Tensor`. Must have the same type as `x`.
|
| 268 |
+
name: A name for the operation (optional).
|
| 269 |
+
|
| 270 |
+
Returns:
|
| 271 |
+
A `Tensor`. Has the same type as `x`.
|
| 272 |
+
"""
|
| 273 |
+
_ctx = _context._context or _context.context()
|
| 274 |
+
tld = _ctx._thread_local_data
|
| 275 |
+
if tld.is_eager:
|
| 276 |
+
try:
|
| 277 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 278 |
+
_ctx, "BitwiseXor", name, x, y)
|
| 279 |
+
return _result
|
| 280 |
+
except _core._NotOkStatusException as e:
|
| 281 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 282 |
+
except _core._FallbackException:
|
| 283 |
+
pass
|
| 284 |
+
try:
|
| 285 |
+
_result = _dispatcher_for_bitwise_xor(
|
| 286 |
+
(x, y, name,), None)
|
| 287 |
+
if _result is not NotImplemented:
|
| 288 |
+
return _result
|
| 289 |
+
return bitwise_xor_eager_fallback(
|
| 290 |
+
x, y, name=name, ctx=_ctx)
|
| 291 |
+
except _core._SymbolicException:
|
| 292 |
+
pass # Add nodes to the TensorFlow graph.
|
| 293 |
+
except (TypeError, ValueError):
|
| 294 |
+
_result = _dispatch.dispatch(
|
| 295 |
+
bitwise_xor, (), dict(x=x, y=y, name=name)
|
| 296 |
+
)
|
| 297 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 298 |
+
return _result
|
| 299 |
+
raise
|
| 300 |
+
else:
|
| 301 |
+
_result = _dispatcher_for_bitwise_xor(
|
| 302 |
+
(x, y, name,), None)
|
| 303 |
+
if _result is not NotImplemented:
|
| 304 |
+
return _result
|
| 305 |
+
# Add nodes to the TensorFlow graph.
|
| 306 |
+
try:
|
| 307 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 308 |
+
"BitwiseXor", x=x, y=y, name=name)
|
| 309 |
+
except (TypeError, ValueError):
|
| 310 |
+
_result = _dispatch.dispatch(
|
| 311 |
+
bitwise_xor, (), dict(x=x, y=y, name=name)
|
| 312 |
+
)
|
| 313 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 314 |
+
return _result
|
| 315 |
+
raise
|
| 316 |
+
_result = _outputs[:]
|
| 317 |
+
if _execute.must_record_gradient():
|
| 318 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 319 |
+
_inputs_flat = _op.inputs
|
| 320 |
+
_execute.record_gradient(
|
| 321 |
+
"BitwiseXor", _inputs_flat, _attrs, _result)
|
| 322 |
+
_result, = _result
|
| 323 |
+
return _result
|
| 324 |
+
|
| 325 |
+
BitwiseXor = tf_export("raw_ops.BitwiseXor")(_ops.to_raw_op(bitwise_xor))
|
| 326 |
+
_dispatcher_for_bitwise_xor = bitwise_xor._tf_type_based_dispatcher.Dispatch
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def bitwise_xor_eager_fallback(x: Annotated[Any, TV_BitwiseXor_T], y: Annotated[Any, TV_BitwiseXor_T], name, ctx) -> Annotated[Any, TV_BitwiseXor_T]:
|
| 330 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 331 |
+
(x, y) = _inputs_T
|
| 332 |
+
_inputs_flat = [x, y]
|
| 333 |
+
_attrs = ("T", _attr_T)
|
| 334 |
+
_result = _execute.execute(b"BitwiseXor", 1, inputs=_inputs_flat,
|
| 335 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 336 |
+
if _execute.must_record_gradient():
|
| 337 |
+
_execute.record_gradient(
|
| 338 |
+
"BitwiseXor", _inputs_flat, _attrs, _result)
|
| 339 |
+
_result, = _result
|
| 340 |
+
return _result
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
TV_Invert_T = TypeVar("TV_Invert_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 344 |
+
|
| 345 |
+
@_dispatch.add_fallback_dispatch_list
|
| 346 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 347 |
+
@tf_export('bitwise.invert')
|
| 348 |
+
def invert(x: Annotated[Any, TV_Invert_T], name=None) -> Annotated[Any, TV_Invert_T]:
|
| 349 |
+
r"""Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
|
| 350 |
+
|
| 351 |
+
Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
|
| 352 |
+
This operation is performed on each element of the tensor argument `x`.
|
| 353 |
+
|
| 354 |
+
Example:
|
| 355 |
+
```python
|
| 356 |
+
import tensorflow as tf
|
| 357 |
+
from tensorflow.python.ops import bitwise_ops
|
| 358 |
+
|
| 359 |
+
# flip 2 (00000010) to -3 (11111101)
|
| 360 |
+
tf.assert_equal(-3, bitwise_ops.invert(2))
|
| 361 |
+
|
| 362 |
+
dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
|
| 363 |
+
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
|
| 364 |
+
|
| 365 |
+
inputs = [0, 5, 3, 14]
|
| 366 |
+
for dtype in dtype_list:
|
| 367 |
+
# Because of issues with negative numbers, let's test this indirectly.
|
| 368 |
+
# 1. invert(a) and a = 0
|
| 369 |
+
# 2. invert(a) or a = invert(0)
|
| 370 |
+
input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
|
| 371 |
+
not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
|
| 372 |
+
input_tensor, bitwise_ops.invert(input_tensor)),
|
| 373 |
+
bitwise_ops.bitwise_or(
|
| 374 |
+
input_tensor, bitwise_ops.invert(input_tensor)),
|
| 375 |
+
bitwise_ops.invert(
|
| 376 |
+
tf.constant(0, dtype=dtype))]
|
| 377 |
+
|
| 378 |
+
expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
|
| 379 |
+
tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
|
| 380 |
+
|
| 381 |
+
expected = tf.cast([not_0] * 4, tf.float32)
|
| 382 |
+
tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
|
| 383 |
+
|
| 384 |
+
# For unsigned dtypes let's also check the result directly.
|
| 385 |
+
if dtype.is_unsigned:
|
| 386 |
+
inverted = bitwise_ops.invert(input_tensor)
|
| 387 |
+
expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
|
| 388 |
+
tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
|
| 389 |
+
```
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 393 |
+
name: A name for the operation (optional).
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
A `Tensor`. Has the same type as `x`.
|
| 397 |
+
"""
|
| 398 |
+
_ctx = _context._context or _context.context()
|
| 399 |
+
tld = _ctx._thread_local_data
|
| 400 |
+
if tld.is_eager:
|
| 401 |
+
try:
|
| 402 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 403 |
+
_ctx, "Invert", name, x)
|
| 404 |
+
return _result
|
| 405 |
+
except _core._NotOkStatusException as e:
|
| 406 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 407 |
+
except _core._FallbackException:
|
| 408 |
+
pass
|
| 409 |
+
try:
|
| 410 |
+
_result = _dispatcher_for_invert(
|
| 411 |
+
(x, name,), None)
|
| 412 |
+
if _result is not NotImplemented:
|
| 413 |
+
return _result
|
| 414 |
+
return invert_eager_fallback(
|
| 415 |
+
x, name=name, ctx=_ctx)
|
| 416 |
+
except _core._SymbolicException:
|
| 417 |
+
pass # Add nodes to the TensorFlow graph.
|
| 418 |
+
except (TypeError, ValueError):
|
| 419 |
+
_result = _dispatch.dispatch(
|
| 420 |
+
invert, (), dict(x=x, name=name)
|
| 421 |
+
)
|
| 422 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 423 |
+
return _result
|
| 424 |
+
raise
|
| 425 |
+
else:
|
| 426 |
+
_result = _dispatcher_for_invert(
|
| 427 |
+
(x, name,), None)
|
| 428 |
+
if _result is not NotImplemented:
|
| 429 |
+
return _result
|
| 430 |
+
# Add nodes to the TensorFlow graph.
|
| 431 |
+
try:
|
| 432 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 433 |
+
"Invert", x=x, name=name)
|
| 434 |
+
except (TypeError, ValueError):
|
| 435 |
+
_result = _dispatch.dispatch(
|
| 436 |
+
invert, (), dict(x=x, name=name)
|
| 437 |
+
)
|
| 438 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 439 |
+
return _result
|
| 440 |
+
raise
|
| 441 |
+
_result = _outputs[:]
|
| 442 |
+
if _execute.must_record_gradient():
|
| 443 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 444 |
+
_inputs_flat = _op.inputs
|
| 445 |
+
_execute.record_gradient(
|
| 446 |
+
"Invert", _inputs_flat, _attrs, _result)
|
| 447 |
+
_result, = _result
|
| 448 |
+
return _result
|
| 449 |
+
|
| 450 |
+
Invert = tf_export("raw_ops.Invert")(_ops.to_raw_op(invert))
|
| 451 |
+
_dispatcher_for_invert = invert._tf_type_based_dispatcher.Dispatch
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def invert_eager_fallback(x: Annotated[Any, TV_Invert_T], name, ctx) -> Annotated[Any, TV_Invert_T]:
|
| 455 |
+
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 456 |
+
_inputs_flat = [x]
|
| 457 |
+
_attrs = ("T", _attr_T)
|
| 458 |
+
_result = _execute.execute(b"Invert", 1, inputs=_inputs_flat, attrs=_attrs,
|
| 459 |
+
ctx=ctx, name=name)
|
| 460 |
+
if _execute.must_record_gradient():
|
| 461 |
+
_execute.record_gradient(
|
| 462 |
+
"Invert", _inputs_flat, _attrs, _result)
|
| 463 |
+
_result, = _result
|
| 464 |
+
return _result
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
TV_LeftShift_T = TypeVar("TV_LeftShift_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 468 |
+
|
| 469 |
+
@_dispatch.add_fallback_dispatch_list
|
| 470 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 471 |
+
@tf_export('bitwise.left_shift')
|
| 472 |
+
def left_shift(x: Annotated[Any, TV_LeftShift_T], y: Annotated[Any, TV_LeftShift_T], name=None) -> Annotated[Any, TV_LeftShift_T]:
|
| 473 |
+
r"""Elementwise computes the bitwise left-shift of `x` and `y`.
|
| 474 |
+
|
| 475 |
+
If `y` is negative, or greater than or equal to the width of `x` in bits the
|
| 476 |
+
result is implementation defined.
|
| 477 |
+
|
| 478 |
+
Example:
|
| 479 |
+
|
| 480 |
+
```python
|
| 481 |
+
import tensorflow as tf
|
| 482 |
+
from tensorflow.python.ops import bitwise_ops
|
| 483 |
+
import numpy as np
|
| 484 |
+
dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
|
| 485 |
+
|
| 486 |
+
for dtype in dtype_list:
|
| 487 |
+
lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
|
| 488 |
+
rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
|
| 489 |
+
|
| 490 |
+
left_shift_result = bitwise_ops.left_shift(lhs, rhs)
|
| 491 |
+
|
| 492 |
+
print(left_shift_result)
|
| 493 |
+
|
| 494 |
+
# This will print:
|
| 495 |
+
# tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8)
|
| 496 |
+
# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16)
|
| 497 |
+
# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32)
|
| 498 |
+
# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64)
|
| 499 |
+
|
| 500 |
+
lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
|
| 501 |
+
rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
|
| 502 |
+
bitwise_ops.left_shift(lhs, rhs)
|
| 503 |
+
# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)>
|
| 504 |
+
```
|
| 505 |
+
|
| 506 |
+
Args:
|
| 507 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 508 |
+
y: A `Tensor`. Must have the same type as `x`.
|
| 509 |
+
name: A name for the operation (optional).
|
| 510 |
+
|
| 511 |
+
Returns:
|
| 512 |
+
A `Tensor`. Has the same type as `x`.
|
| 513 |
+
"""
|
| 514 |
+
_ctx = _context._context or _context.context()
|
| 515 |
+
tld = _ctx._thread_local_data
|
| 516 |
+
if tld.is_eager:
|
| 517 |
+
try:
|
| 518 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 519 |
+
_ctx, "LeftShift", name, x, y)
|
| 520 |
+
return _result
|
| 521 |
+
except _core._NotOkStatusException as e:
|
| 522 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 523 |
+
except _core._FallbackException:
|
| 524 |
+
pass
|
| 525 |
+
try:
|
| 526 |
+
_result = _dispatcher_for_left_shift(
|
| 527 |
+
(x, y, name,), None)
|
| 528 |
+
if _result is not NotImplemented:
|
| 529 |
+
return _result
|
| 530 |
+
return left_shift_eager_fallback(
|
| 531 |
+
x, y, name=name, ctx=_ctx)
|
| 532 |
+
except _core._SymbolicException:
|
| 533 |
+
pass # Add nodes to the TensorFlow graph.
|
| 534 |
+
except (TypeError, ValueError):
|
| 535 |
+
_result = _dispatch.dispatch(
|
| 536 |
+
left_shift, (), dict(x=x, y=y, name=name)
|
| 537 |
+
)
|
| 538 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 539 |
+
return _result
|
| 540 |
+
raise
|
| 541 |
+
else:
|
| 542 |
+
_result = _dispatcher_for_left_shift(
|
| 543 |
+
(x, y, name,), None)
|
| 544 |
+
if _result is not NotImplemented:
|
| 545 |
+
return _result
|
| 546 |
+
# Add nodes to the TensorFlow graph.
|
| 547 |
+
try:
|
| 548 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 549 |
+
"LeftShift", x=x, y=y, name=name)
|
| 550 |
+
except (TypeError, ValueError):
|
| 551 |
+
_result = _dispatch.dispatch(
|
| 552 |
+
left_shift, (), dict(x=x, y=y, name=name)
|
| 553 |
+
)
|
| 554 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 555 |
+
return _result
|
| 556 |
+
raise
|
| 557 |
+
_result = _outputs[:]
|
| 558 |
+
if _execute.must_record_gradient():
|
| 559 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 560 |
+
_inputs_flat = _op.inputs
|
| 561 |
+
_execute.record_gradient(
|
| 562 |
+
"LeftShift", _inputs_flat, _attrs, _result)
|
| 563 |
+
_result, = _result
|
| 564 |
+
return _result
|
| 565 |
+
|
| 566 |
+
LeftShift = tf_export("raw_ops.LeftShift")(_ops.to_raw_op(left_shift))
|
| 567 |
+
_dispatcher_for_left_shift = left_shift._tf_type_based_dispatcher.Dispatch
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def left_shift_eager_fallback(x: Annotated[Any, TV_LeftShift_T], y: Annotated[Any, TV_LeftShift_T], name, ctx) -> Annotated[Any, TV_LeftShift_T]:
|
| 571 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 572 |
+
(x, y) = _inputs_T
|
| 573 |
+
_inputs_flat = [x, y]
|
| 574 |
+
_attrs = ("T", _attr_T)
|
| 575 |
+
_result = _execute.execute(b"LeftShift", 1, inputs=_inputs_flat,
|
| 576 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 577 |
+
if _execute.must_record_gradient():
|
| 578 |
+
_execute.record_gradient(
|
| 579 |
+
"LeftShift", _inputs_flat, _attrs, _result)
|
| 580 |
+
_result, = _result
|
| 581 |
+
return _result
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
TV_PopulationCount_T = TypeVar("TV_PopulationCount_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 585 |
+
|
| 586 |
+
def population_count(x: Annotated[Any, TV_PopulationCount_T], name=None) -> Annotated[Any, _atypes.UInt8]:
|
| 587 |
+
r"""Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
|
| 588 |
+
|
| 589 |
+
For each entry in `x`, calculates the number of `1` (on) bits in the binary
|
| 590 |
+
representation of that entry.
|
| 591 |
+
|
| 592 |
+
**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
|
| 593 |
+
`int32` or `int64` and perform the bitcount on the result, than to feed in
|
| 594 |
+
8- or 16-bit inputs and then aggregate the resulting counts.
|
| 595 |
+
|
| 596 |
+
Args:
|
| 597 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 598 |
+
name: A name for the operation (optional).
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
A `Tensor` of type `uint8`.
|
| 602 |
+
"""
|
| 603 |
+
_ctx = _context._context or _context.context()
|
| 604 |
+
tld = _ctx._thread_local_data
|
| 605 |
+
if tld.is_eager:
|
| 606 |
+
try:
|
| 607 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 608 |
+
_ctx, "PopulationCount", name, x)
|
| 609 |
+
return _result
|
| 610 |
+
except _core._NotOkStatusException as e:
|
| 611 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 612 |
+
except _core._FallbackException:
|
| 613 |
+
pass
|
| 614 |
+
try:
|
| 615 |
+
return population_count_eager_fallback(
|
| 616 |
+
x, name=name, ctx=_ctx)
|
| 617 |
+
except _core._SymbolicException:
|
| 618 |
+
pass # Add nodes to the TensorFlow graph.
|
| 619 |
+
# Add nodes to the TensorFlow graph.
|
| 620 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 621 |
+
"PopulationCount", x=x, name=name)
|
| 622 |
+
_result = _outputs[:]
|
| 623 |
+
if _execute.must_record_gradient():
|
| 624 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 625 |
+
_inputs_flat = _op.inputs
|
| 626 |
+
_execute.record_gradient(
|
| 627 |
+
"PopulationCount", _inputs_flat, _attrs, _result)
|
| 628 |
+
_result, = _result
|
| 629 |
+
return _result
|
| 630 |
+
|
| 631 |
+
PopulationCount = tf_export("raw_ops.PopulationCount")(_ops.to_raw_op(population_count))
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def population_count_eager_fallback(x: Annotated[Any, TV_PopulationCount_T], name, ctx) -> Annotated[Any, _atypes.UInt8]:
|
| 635 |
+
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 636 |
+
_inputs_flat = [x]
|
| 637 |
+
_attrs = ("T", _attr_T)
|
| 638 |
+
_result = _execute.execute(b"PopulationCount", 1, inputs=_inputs_flat,
|
| 639 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 640 |
+
if _execute.must_record_gradient():
|
| 641 |
+
_execute.record_gradient(
|
| 642 |
+
"PopulationCount", _inputs_flat, _attrs, _result)
|
| 643 |
+
_result, = _result
|
| 644 |
+
return _result
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
TV_RightShift_T = TypeVar("TV_RightShift_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8)
|
| 648 |
+
|
| 649 |
+
@_dispatch.add_fallback_dispatch_list
|
| 650 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 651 |
+
@tf_export('bitwise.right_shift')
|
| 652 |
+
def right_shift(x: Annotated[Any, TV_RightShift_T], y: Annotated[Any, TV_RightShift_T], name=None) -> Annotated[Any, TV_RightShift_T]:
|
| 653 |
+
r"""Elementwise computes the bitwise right-shift of `x` and `y`.
|
| 654 |
+
|
| 655 |
+
Performs a logical shift for unsigned integer types, and an arithmetic shift
|
| 656 |
+
for signed integer types.
|
| 657 |
+
|
| 658 |
+
If `y` is negative, or greater than or equal to than the width of `x` in bits
|
| 659 |
+
the result is implementation defined.
|
| 660 |
+
|
| 661 |
+
Example:
|
| 662 |
+
|
| 663 |
+
```python
|
| 664 |
+
import tensorflow as tf
|
| 665 |
+
from tensorflow.python.ops import bitwise_ops
|
| 666 |
+
import numpy as np
|
| 667 |
+
dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
|
| 668 |
+
|
| 669 |
+
for dtype in dtype_list:
|
| 670 |
+
lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
|
| 671 |
+
rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
|
| 672 |
+
|
| 673 |
+
right_shift_result = bitwise_ops.right_shift(lhs, rhs)
|
| 674 |
+
|
| 675 |
+
print(right_shift_result)
|
| 676 |
+
|
| 677 |
+
# This will print:
|
| 678 |
+
# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
|
| 679 |
+
# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
|
| 680 |
+
# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
|
| 681 |
+
# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
|
| 682 |
+
|
| 683 |
+
lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
|
| 684 |
+
rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
|
| 685 |
+
bitwise_ops.right_shift(lhs, rhs)
|
| 686 |
+
# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)>
|
| 687 |
+
```
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.
|
| 691 |
+
y: A `Tensor`. Must have the same type as `x`.
|
| 692 |
+
name: A name for the operation (optional).
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
A `Tensor`. Has the same type as `x`.
|
| 696 |
+
"""
|
| 697 |
+
_ctx = _context._context or _context.context()
|
| 698 |
+
tld = _ctx._thread_local_data
|
| 699 |
+
if tld.is_eager:
|
| 700 |
+
try:
|
| 701 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 702 |
+
_ctx, "RightShift", name, x, y)
|
| 703 |
+
return _result
|
| 704 |
+
except _core._NotOkStatusException as e:
|
| 705 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 706 |
+
except _core._FallbackException:
|
| 707 |
+
pass
|
| 708 |
+
try:
|
| 709 |
+
_result = _dispatcher_for_right_shift(
|
| 710 |
+
(x, y, name,), None)
|
| 711 |
+
if _result is not NotImplemented:
|
| 712 |
+
return _result
|
| 713 |
+
return right_shift_eager_fallback(
|
| 714 |
+
x, y, name=name, ctx=_ctx)
|
| 715 |
+
except _core._SymbolicException:
|
| 716 |
+
pass # Add nodes to the TensorFlow graph.
|
| 717 |
+
except (TypeError, ValueError):
|
| 718 |
+
_result = _dispatch.dispatch(
|
| 719 |
+
right_shift, (), dict(x=x, y=y, name=name)
|
| 720 |
+
)
|
| 721 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 722 |
+
return _result
|
| 723 |
+
raise
|
| 724 |
+
else:
|
| 725 |
+
_result = _dispatcher_for_right_shift(
|
| 726 |
+
(x, y, name,), None)
|
| 727 |
+
if _result is not NotImplemented:
|
| 728 |
+
return _result
|
| 729 |
+
# Add nodes to the TensorFlow graph.
|
| 730 |
+
try:
|
| 731 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 732 |
+
"RightShift", x=x, y=y, name=name)
|
| 733 |
+
except (TypeError, ValueError):
|
| 734 |
+
_result = _dispatch.dispatch(
|
| 735 |
+
right_shift, (), dict(x=x, y=y, name=name)
|
| 736 |
+
)
|
| 737 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 738 |
+
return _result
|
| 739 |
+
raise
|
| 740 |
+
_result = _outputs[:]
|
| 741 |
+
if _execute.must_record_gradient():
|
| 742 |
+
_attrs = ("T", _op._get_attr_type("T"))
|
| 743 |
+
_inputs_flat = _op.inputs
|
| 744 |
+
_execute.record_gradient(
|
| 745 |
+
"RightShift", _inputs_flat, _attrs, _result)
|
| 746 |
+
_result, = _result
|
| 747 |
+
return _result
|
| 748 |
+
|
| 749 |
+
RightShift = tf_export("raw_ops.RightShift")(_ops.to_raw_op(right_shift))
|
| 750 |
+
_dispatcher_for_right_shift = right_shift._tf_type_based_dispatcher.Dispatch
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
def right_shift_eager_fallback(x: Annotated[Any, TV_RightShift_T], y: Annotated[Any, TV_RightShift_T], name, ctx) -> Annotated[Any, TV_RightShift_T]:
|
| 754 |
+
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ])
|
| 755 |
+
(x, y) = _inputs_T
|
| 756 |
+
_inputs_flat = [x, y]
|
| 757 |
+
_attrs = ("T", _attr_T)
|
| 758 |
+
_result = _execute.execute(b"RightShift", 1, inputs=_inputs_flat,
|
| 759 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 760 |
+
if _execute.must_record_gradient():
|
| 761 |
+
_execute.record_gradient(
|
| 762 |
+
"RightShift", _inputs_flat, _attrs, _result)
|
| 763 |
+
_result, = _result
|
| 764 |
+
return _result
|
| 765 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_collective_ops.py
ADDED
|
@@ -0,0 +1,1452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Python wrappers around TensorFlow ops.
|
| 2 |
+
|
| 3 |
+
This file is MACHINE GENERATED! Do not edit.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
|
| 8 |
+
from tensorflow.python import pywrap_tfe as pywrap_tfe
|
| 9 |
+
from tensorflow.python.eager import context as _context
|
| 10 |
+
from tensorflow.python.eager import core as _core
|
| 11 |
+
from tensorflow.python.eager import execute as _execute
|
| 12 |
+
from tensorflow.python.framework import dtypes as _dtypes
|
| 13 |
+
from tensorflow.security.fuzzing.py import annotation_types as _atypes
|
| 14 |
+
|
| 15 |
+
from tensorflow.python.framework import op_def_registry as _op_def_registry
|
| 16 |
+
from tensorflow.python.framework import ops as _ops
|
| 17 |
+
from tensorflow.python.framework import op_def_library as _op_def_library
|
| 18 |
+
from tensorflow.python.util.deprecation import deprecated_endpoints
|
| 19 |
+
from tensorflow.python.util import dispatch as _dispatch
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
from typing import TypeVar, List, Any
|
| 23 |
+
from typing_extensions import Annotated
|
| 24 |
+
|
| 25 |
+
TV_CollectiveAllToAllV2_T = TypeVar("TV_CollectiveAllToAllV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 26 |
+
|
| 27 |
+
def collective_all_to_all_v2(input: Annotated[Any, TV_CollectiveAllToAllV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, name=None) -> Annotated[Any, TV_CollectiveAllToAllV2_T]:
|
| 28 |
+
r"""Mutually exchanges multiple tensors of identical type and shape.
|
| 29 |
+
|
| 30 |
+
`is_stateless` means each op does not need control dependencies to other
|
| 31 |
+
collective ops. In this case, keys that are unique at runtime
|
| 32 |
+
(e.g. `instance_key`) should be used to distinguish collective groups.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 36 |
+
group_size: A `Tensor` of type `int32`.
|
| 37 |
+
group_key: A `Tensor` of type `int32`.
|
| 38 |
+
instance_key: A `Tensor` of type `int32`.
|
| 39 |
+
ordering_token: A list of `Tensor` objects with type `resource`.
|
| 40 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 41 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 42 |
+
is_stateless: An optional `bool`. Defaults to `False`.
|
| 43 |
+
name: A name for the operation (optional).
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A `Tensor`. Has the same type as `input`.
|
| 47 |
+
"""
|
| 48 |
+
_ctx = _context._context or _context.context()
|
| 49 |
+
tld = _ctx._thread_local_data
|
| 50 |
+
if tld.is_eager:
|
| 51 |
+
try:
|
| 52 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 53 |
+
_ctx, "CollectiveAllToAllV2", name, input, group_size, group_key,
|
| 54 |
+
instance_key, ordering_token, "communication_hint",
|
| 55 |
+
communication_hint, "timeout_seconds", timeout_seconds,
|
| 56 |
+
"is_stateless", is_stateless)
|
| 57 |
+
return _result
|
| 58 |
+
except _core._NotOkStatusException as e:
|
| 59 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 60 |
+
except _core._FallbackException:
|
| 61 |
+
pass
|
| 62 |
+
try:
|
| 63 |
+
return collective_all_to_all_v2_eager_fallback(
|
| 64 |
+
input, group_size, group_key, instance_key, ordering_token,
|
| 65 |
+
communication_hint=communication_hint,
|
| 66 |
+
timeout_seconds=timeout_seconds, is_stateless=is_stateless,
|
| 67 |
+
name=name, ctx=_ctx)
|
| 68 |
+
except _core._SymbolicException:
|
| 69 |
+
pass # Add nodes to the TensorFlow graph.
|
| 70 |
+
# Add nodes to the TensorFlow graph.
|
| 71 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 72 |
+
raise TypeError(
|
| 73 |
+
"Expected list for 'ordering_token' argument to "
|
| 74 |
+
"'collective_all_to_all_v2' Op, not %r." % ordering_token)
|
| 75 |
+
_attr_Nordering_token = len(ordering_token)
|
| 76 |
+
if communication_hint is None:
|
| 77 |
+
communication_hint = "auto"
|
| 78 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 79 |
+
if timeout_seconds is None:
|
| 80 |
+
timeout_seconds = 0
|
| 81 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 82 |
+
if is_stateless is None:
|
| 83 |
+
is_stateless = False
|
| 84 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 85 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 86 |
+
"CollectiveAllToAllV2", input=input, group_size=group_size,
|
| 87 |
+
group_key=group_key,
|
| 88 |
+
instance_key=instance_key,
|
| 89 |
+
ordering_token=ordering_token,
|
| 90 |
+
communication_hint=communication_hint,
|
| 91 |
+
timeout_seconds=timeout_seconds,
|
| 92 |
+
is_stateless=is_stateless, name=name)
|
| 93 |
+
_result = _outputs[:]
|
| 94 |
+
if _execute.must_record_gradient():
|
| 95 |
+
_attrs = ("T", _op._get_attr_type("T"), "communication_hint",
|
| 96 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 97 |
+
_op.get_attr("timeout_seconds"), "is_stateless",
|
| 98 |
+
_op._get_attr_bool("is_stateless"), "Nordering_token",
|
| 99 |
+
_op._get_attr_int("Nordering_token"))
|
| 100 |
+
_inputs_flat = _op.inputs
|
| 101 |
+
_execute.record_gradient(
|
| 102 |
+
"CollectiveAllToAllV2", _inputs_flat, _attrs, _result)
|
| 103 |
+
_result, = _result
|
| 104 |
+
return _result
|
| 105 |
+
|
| 106 |
+
CollectiveAllToAllV2 = tf_export("raw_ops.CollectiveAllToAllV2")(_ops.to_raw_op(collective_all_to_all_v2))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def collective_all_to_all_v2_eager_fallback(input: Annotated[Any, TV_CollectiveAllToAllV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint: str, timeout_seconds: float, is_stateless: bool, name, ctx) -> Annotated[Any, TV_CollectiveAllToAllV2_T]:
|
| 110 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 111 |
+
raise TypeError(
|
| 112 |
+
"Expected list for 'ordering_token' argument to "
|
| 113 |
+
"'collective_all_to_all_v2' Op, not %r." % ordering_token)
|
| 114 |
+
_attr_Nordering_token = len(ordering_token)
|
| 115 |
+
if communication_hint is None:
|
| 116 |
+
communication_hint = "auto"
|
| 117 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 118 |
+
if timeout_seconds is None:
|
| 119 |
+
timeout_seconds = 0
|
| 120 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 121 |
+
if is_stateless is None:
|
| 122 |
+
is_stateless = False
|
| 123 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 124 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 125 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 126 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 127 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 128 |
+
ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource)
|
| 129 |
+
_inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token)
|
| 130 |
+
_attrs = ("T", _attr_T, "communication_hint", communication_hint,
|
| 131 |
+
"timeout_seconds", timeout_seconds, "is_stateless", is_stateless,
|
| 132 |
+
"Nordering_token", _attr_Nordering_token)
|
| 133 |
+
_result = _execute.execute(b"CollectiveAllToAllV2", 1, inputs=_inputs_flat,
|
| 134 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 135 |
+
if _execute.must_record_gradient():
|
| 136 |
+
_execute.record_gradient(
|
| 137 |
+
"CollectiveAllToAllV2", _inputs_flat, _attrs, _result)
|
| 138 |
+
_result, = _result
|
| 139 |
+
return _result
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
TV_CollectiveAllToAllV3_T = TypeVar("TV_CollectiveAllToAllV3_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 143 |
+
|
| 144 |
+
def collective_all_to_all_v3(input: Annotated[Any, TV_CollectiveAllToAllV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveAllToAllV3_T]:
|
| 145 |
+
r"""Mutually exchanges multiple tensors of identical type and shape.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 149 |
+
communicator: A `Tensor` of type `resource`.
|
| 150 |
+
group_assignment: A `Tensor` of type `int32`.
|
| 151 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 152 |
+
name: A name for the operation (optional).
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
A `Tensor`. Has the same type as `input`.
|
| 156 |
+
"""
|
| 157 |
+
_ctx = _context._context or _context.context()
|
| 158 |
+
tld = _ctx._thread_local_data
|
| 159 |
+
if tld.is_eager:
|
| 160 |
+
try:
|
| 161 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 162 |
+
_ctx, "CollectiveAllToAllV3", name, input, communicator,
|
| 163 |
+
group_assignment, "timeout_seconds", timeout_seconds)
|
| 164 |
+
return _result
|
| 165 |
+
except _core._NotOkStatusException as e:
|
| 166 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 167 |
+
except _core._FallbackException:
|
| 168 |
+
pass
|
| 169 |
+
try:
|
| 170 |
+
return collective_all_to_all_v3_eager_fallback(
|
| 171 |
+
input, communicator, group_assignment,
|
| 172 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 173 |
+
except _core._SymbolicException:
|
| 174 |
+
pass # Add nodes to the TensorFlow graph.
|
| 175 |
+
# Add nodes to the TensorFlow graph.
|
| 176 |
+
if timeout_seconds is None:
|
| 177 |
+
timeout_seconds = 0
|
| 178 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 179 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 180 |
+
"CollectiveAllToAllV3", input=input, communicator=communicator,
|
| 181 |
+
group_assignment=group_assignment,
|
| 182 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 183 |
+
_result = _outputs[:]
|
| 184 |
+
if _execute.must_record_gradient():
|
| 185 |
+
_attrs = ("T", _op._get_attr_type("T"), "timeout_seconds",
|
| 186 |
+
_op.get_attr("timeout_seconds"))
|
| 187 |
+
_inputs_flat = _op.inputs
|
| 188 |
+
_execute.record_gradient(
|
| 189 |
+
"CollectiveAllToAllV3", _inputs_flat, _attrs, _result)
|
| 190 |
+
_result, = _result
|
| 191 |
+
return _result
|
| 192 |
+
|
| 193 |
+
CollectiveAllToAllV3 = tf_export("raw_ops.CollectiveAllToAllV3")(_ops.to_raw_op(collective_all_to_all_v3))
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def collective_all_to_all_v3_eager_fallback(input: Annotated[Any, TV_CollectiveAllToAllV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveAllToAllV3_T]:
|
| 197 |
+
if timeout_seconds is None:
|
| 198 |
+
timeout_seconds = 0
|
| 199 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 200 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 201 |
+
communicator = _ops.convert_to_tensor(communicator, _dtypes.resource)
|
| 202 |
+
group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32)
|
| 203 |
+
_inputs_flat = [input, communicator, group_assignment]
|
| 204 |
+
_attrs = ("T", _attr_T, "timeout_seconds", timeout_seconds)
|
| 205 |
+
_result = _execute.execute(b"CollectiveAllToAllV3", 1, inputs=_inputs_flat,
|
| 206 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 207 |
+
if _execute.must_record_gradient():
|
| 208 |
+
_execute.record_gradient(
|
| 209 |
+
"CollectiveAllToAllV3", _inputs_flat, _attrs, _result)
|
| 210 |
+
_result, = _result
|
| 211 |
+
return _result
|
| 212 |
+
|
| 213 |
+
_CollectiveAssignGroupV2Output = collections.namedtuple(
|
| 214 |
+
"CollectiveAssignGroupV2",
|
| 215 |
+
["group_size", "group_key"])
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def collective_assign_group_v2(group_assignment: Annotated[Any, _atypes.Int32], device_index: Annotated[Any, _atypes.Int32], base_key: Annotated[Any, _atypes.Int32], name=None):
|
| 219 |
+
r"""Assign group keys based on group assignment.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
group_assignment: A `Tensor` of type `int32`.
|
| 223 |
+
device_index: A `Tensor` of type `int32`.
|
| 224 |
+
base_key: A `Tensor` of type `int32`.
|
| 225 |
+
name: A name for the operation (optional).
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
A tuple of `Tensor` objects (group_size, group_key).
|
| 229 |
+
|
| 230 |
+
group_size: A `Tensor` of type `int32`.
|
| 231 |
+
group_key: A `Tensor` of type `int32`.
|
| 232 |
+
"""
|
| 233 |
+
_ctx = _context._context or _context.context()
|
| 234 |
+
tld = _ctx._thread_local_data
|
| 235 |
+
if tld.is_eager:
|
| 236 |
+
try:
|
| 237 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 238 |
+
_ctx, "CollectiveAssignGroupV2", name, group_assignment, device_index,
|
| 239 |
+
base_key)
|
| 240 |
+
_result = _CollectiveAssignGroupV2Output._make(_result)
|
| 241 |
+
return _result
|
| 242 |
+
except _core._NotOkStatusException as e:
|
| 243 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 244 |
+
except _core._FallbackException:
|
| 245 |
+
pass
|
| 246 |
+
try:
|
| 247 |
+
return collective_assign_group_v2_eager_fallback(
|
| 248 |
+
group_assignment, device_index, base_key, name=name, ctx=_ctx)
|
| 249 |
+
except _core._SymbolicException:
|
| 250 |
+
pass # Add nodes to the TensorFlow graph.
|
| 251 |
+
# Add nodes to the TensorFlow graph.
|
| 252 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 253 |
+
"CollectiveAssignGroupV2", group_assignment=group_assignment,
|
| 254 |
+
device_index=device_index,
|
| 255 |
+
base_key=base_key, name=name)
|
| 256 |
+
_result = _outputs[:]
|
| 257 |
+
if _execute.must_record_gradient():
|
| 258 |
+
_attrs = ()
|
| 259 |
+
_inputs_flat = _op.inputs
|
| 260 |
+
_execute.record_gradient(
|
| 261 |
+
"CollectiveAssignGroupV2", _inputs_flat, _attrs, _result)
|
| 262 |
+
_result = _CollectiveAssignGroupV2Output._make(_result)
|
| 263 |
+
return _result
|
| 264 |
+
|
| 265 |
+
CollectiveAssignGroupV2 = tf_export("raw_ops.CollectiveAssignGroupV2")(_ops.to_raw_op(collective_assign_group_v2))
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def collective_assign_group_v2_eager_fallback(group_assignment: Annotated[Any, _atypes.Int32], device_index: Annotated[Any, _atypes.Int32], base_key: Annotated[Any, _atypes.Int32], name, ctx):
|
| 269 |
+
group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32)
|
| 270 |
+
device_index = _ops.convert_to_tensor(device_index, _dtypes.int32)
|
| 271 |
+
base_key = _ops.convert_to_tensor(base_key, _dtypes.int32)
|
| 272 |
+
_inputs_flat = [group_assignment, device_index, base_key]
|
| 273 |
+
_attrs = None
|
| 274 |
+
_result = _execute.execute(b"CollectiveAssignGroupV2", 2,
|
| 275 |
+
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
|
| 276 |
+
name=name)
|
| 277 |
+
if _execute.must_record_gradient():
|
| 278 |
+
_execute.record_gradient(
|
| 279 |
+
"CollectiveAssignGroupV2", _inputs_flat, _attrs, _result)
|
| 280 |
+
_result = _CollectiveAssignGroupV2Output._make(_result)
|
| 281 |
+
return _result
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
TV_CollectiveBcastRecv_T = TypeVar("TV_CollectiveBcastRecv_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 285 |
+
|
| 286 |
+
def collective_bcast_recv(T: TV_CollectiveBcastRecv_T, group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastRecv_T]:
|
| 287 |
+
r"""Receives a tensor value broadcast from another device.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
T: A `tf.DType` from: `tf.bool, tf.float32, tf.half, tf.float64, tf.int32, tf.int64`.
|
| 291 |
+
group_size: An `int`.
|
| 292 |
+
group_key: An `int`.
|
| 293 |
+
instance_key: An `int`.
|
| 294 |
+
shape: A `tf.TensorShape` or list of `ints`.
|
| 295 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 296 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 297 |
+
name: A name for the operation (optional).
|
| 298 |
+
|
| 299 |
+
Returns:
|
| 300 |
+
A `Tensor` of type `T`.
|
| 301 |
+
"""
|
| 302 |
+
_ctx = _context._context or _context.context()
|
| 303 |
+
tld = _ctx._thread_local_data
|
| 304 |
+
if tld.is_eager:
|
| 305 |
+
try:
|
| 306 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 307 |
+
_ctx, "CollectiveBcastRecv", name, "T", T, "group_size", group_size,
|
| 308 |
+
"group_key", group_key, "instance_key", instance_key, "shape", shape,
|
| 309 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 310 |
+
timeout_seconds)
|
| 311 |
+
return _result
|
| 312 |
+
except _core._NotOkStatusException as e:
|
| 313 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 314 |
+
except _core._FallbackException:
|
| 315 |
+
pass
|
| 316 |
+
try:
|
| 317 |
+
return collective_bcast_recv_eager_fallback(
|
| 318 |
+
T=T, group_size=group_size, group_key=group_key,
|
| 319 |
+
instance_key=instance_key, shape=shape,
|
| 320 |
+
communication_hint=communication_hint,
|
| 321 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 322 |
+
except _core._SymbolicException:
|
| 323 |
+
pass # Add nodes to the TensorFlow graph.
|
| 324 |
+
# Add nodes to the TensorFlow graph.
|
| 325 |
+
T = _execute.make_type(T, "T")
|
| 326 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 327 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 328 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 329 |
+
shape = _execute.make_shape(shape, "shape")
|
| 330 |
+
if communication_hint is None:
|
| 331 |
+
communication_hint = "auto"
|
| 332 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 333 |
+
if timeout_seconds is None:
|
| 334 |
+
timeout_seconds = 0
|
| 335 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 336 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 337 |
+
"CollectiveBcastRecv", T=T, group_size=group_size,
|
| 338 |
+
group_key=group_key, instance_key=instance_key,
|
| 339 |
+
shape=shape,
|
| 340 |
+
communication_hint=communication_hint,
|
| 341 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 342 |
+
_result = _outputs[:]
|
| 343 |
+
if _execute.must_record_gradient():
|
| 344 |
+
_attrs = ("T", _op._get_attr_type("T"), "group_size",
|
| 345 |
+
_op._get_attr_int("group_size"), "group_key",
|
| 346 |
+
_op._get_attr_int("group_key"), "instance_key",
|
| 347 |
+
_op._get_attr_int("instance_key"), "shape",
|
| 348 |
+
_op.get_attr("shape"), "communication_hint",
|
| 349 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 350 |
+
_op.get_attr("timeout_seconds"))
|
| 351 |
+
_inputs_flat = _op.inputs
|
| 352 |
+
_execute.record_gradient(
|
| 353 |
+
"CollectiveBcastRecv", _inputs_flat, _attrs, _result)
|
| 354 |
+
_result, = _result
|
| 355 |
+
return _result
|
| 356 |
+
|
| 357 |
+
CollectiveBcastRecv = tf_export("raw_ops.CollectiveBcastRecv")(_ops.to_raw_op(collective_bcast_recv))
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def collective_bcast_recv_eager_fallback(T: TV_CollectiveBcastRecv_T, group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastRecv_T]:
|
| 361 |
+
T = _execute.make_type(T, "T")
|
| 362 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 363 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 364 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 365 |
+
shape = _execute.make_shape(shape, "shape")
|
| 366 |
+
if communication_hint is None:
|
| 367 |
+
communication_hint = "auto"
|
| 368 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 369 |
+
if timeout_seconds is None:
|
| 370 |
+
timeout_seconds = 0
|
| 371 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 372 |
+
_inputs_flat = []
|
| 373 |
+
_attrs = ("T", T, "group_size", group_size, "group_key", group_key,
|
| 374 |
+
"instance_key", instance_key, "shape", shape, "communication_hint",
|
| 375 |
+
communication_hint, "timeout_seconds", timeout_seconds)
|
| 376 |
+
_result = _execute.execute(b"CollectiveBcastRecv", 1, inputs=_inputs_flat,
|
| 377 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 378 |
+
if _execute.must_record_gradient():
|
| 379 |
+
_execute.record_gradient(
|
| 380 |
+
"CollectiveBcastRecv", _inputs_flat, _attrs, _result)
|
| 381 |
+
_result, = _result
|
| 382 |
+
return _result
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
TV_CollectiveBcastRecvV2_T = TypeVar("TV_CollectiveBcastRecvV2_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 386 |
+
TV_CollectiveBcastRecvV2_Tshape = TypeVar("TV_CollectiveBcastRecvV2_Tshape", _atypes.Int32, _atypes.Int64)
|
| 387 |
+
|
| 388 |
+
def collective_bcast_recv_v2(group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], shape: Annotated[Any, TV_CollectiveBcastRecvV2_Tshape], T: TV_CollectiveBcastRecvV2_T, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastRecvV2_T]:
|
| 389 |
+
r"""Receives a tensor value broadcast from another device.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
group_size: A `Tensor` of type `int32`.
|
| 393 |
+
group_key: A `Tensor` of type `int32`.
|
| 394 |
+
instance_key: A `Tensor` of type `int32`.
|
| 395 |
+
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
|
| 396 |
+
T: A `tf.DType` from: `tf.bool, tf.float32, tf.half, tf.float64, tf.int32, tf.int64`.
|
| 397 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 398 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 399 |
+
name: A name for the operation (optional).
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
A `Tensor` of type `T`.
|
| 403 |
+
"""
|
| 404 |
+
_ctx = _context._context or _context.context()
|
| 405 |
+
tld = _ctx._thread_local_data
|
| 406 |
+
if tld.is_eager:
|
| 407 |
+
try:
|
| 408 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 409 |
+
_ctx, "CollectiveBcastRecvV2", name, group_size, group_key,
|
| 410 |
+
instance_key, shape, "T", T, "communication_hint", communication_hint,
|
| 411 |
+
"timeout_seconds", timeout_seconds)
|
| 412 |
+
return _result
|
| 413 |
+
except _core._NotOkStatusException as e:
|
| 414 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 415 |
+
except _core._FallbackException:
|
| 416 |
+
pass
|
| 417 |
+
try:
|
| 418 |
+
return collective_bcast_recv_v2_eager_fallback(
|
| 419 |
+
group_size, group_key, instance_key, shape, T=T,
|
| 420 |
+
communication_hint=communication_hint,
|
| 421 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 422 |
+
except _core._SymbolicException:
|
| 423 |
+
pass # Add nodes to the TensorFlow graph.
|
| 424 |
+
# Add nodes to the TensorFlow graph.
|
| 425 |
+
T = _execute.make_type(T, "T")
|
| 426 |
+
if communication_hint is None:
|
| 427 |
+
communication_hint = "auto"
|
| 428 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 429 |
+
if timeout_seconds is None:
|
| 430 |
+
timeout_seconds = 0
|
| 431 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 432 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 433 |
+
"CollectiveBcastRecvV2", group_size=group_size, group_key=group_key,
|
| 434 |
+
instance_key=instance_key, shape=shape, T=T,
|
| 435 |
+
communication_hint=communication_hint,
|
| 436 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 437 |
+
_result = _outputs[:]
|
| 438 |
+
if _execute.must_record_gradient():
|
| 439 |
+
_attrs = ("T", _op._get_attr_type("T"), "Tshape",
|
| 440 |
+
_op._get_attr_type("Tshape"), "communication_hint",
|
| 441 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 442 |
+
_op.get_attr("timeout_seconds"))
|
| 443 |
+
_inputs_flat = _op.inputs
|
| 444 |
+
_execute.record_gradient(
|
| 445 |
+
"CollectiveBcastRecvV2", _inputs_flat, _attrs, _result)
|
| 446 |
+
_result, = _result
|
| 447 |
+
return _result
|
| 448 |
+
|
| 449 |
+
CollectiveBcastRecvV2 = tf_export("raw_ops.CollectiveBcastRecvV2")(_ops.to_raw_op(collective_bcast_recv_v2))
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def collective_bcast_recv_v2_eager_fallback(group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], shape: Annotated[Any, TV_CollectiveBcastRecvV2_Tshape], T: TV_CollectiveBcastRecvV2_T, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastRecvV2_T]:
|
| 453 |
+
T = _execute.make_type(T, "T")
|
| 454 |
+
if communication_hint is None:
|
| 455 |
+
communication_hint = "auto"
|
| 456 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 457 |
+
if timeout_seconds is None:
|
| 458 |
+
timeout_seconds = 0
|
| 459 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 460 |
+
_attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
|
| 461 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 462 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 463 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 464 |
+
_inputs_flat = [group_size, group_key, instance_key, shape]
|
| 465 |
+
_attrs = ("T", T, "Tshape", _attr_Tshape, "communication_hint",
|
| 466 |
+
communication_hint, "timeout_seconds", timeout_seconds)
|
| 467 |
+
_result = _execute.execute(b"CollectiveBcastRecvV2", 1, inputs=_inputs_flat,
|
| 468 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 469 |
+
if _execute.must_record_gradient():
|
| 470 |
+
_execute.record_gradient(
|
| 471 |
+
"CollectiveBcastRecvV2", _inputs_flat, _attrs, _result)
|
| 472 |
+
_result, = _result
|
| 473 |
+
return _result
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
TV_CollectiveBcastSend_T = TypeVar("TV_CollectiveBcastSend_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 477 |
+
|
| 478 |
+
def collective_bcast_send(input: Annotated[Any, TV_CollectiveBcastSend_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastSend_T]:
|
| 479 |
+
r"""Broadcasts a tensor value to one or more other devices.
|
| 480 |
+
|
| 481 |
+
Args:
|
| 482 |
+
input: A `Tensor`. Must be one of the following types: `bool`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 483 |
+
group_size: An `int`.
|
| 484 |
+
group_key: An `int`.
|
| 485 |
+
instance_key: An `int`.
|
| 486 |
+
shape: A `tf.TensorShape` or list of `ints`.
|
| 487 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 488 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 489 |
+
name: A name for the operation (optional).
|
| 490 |
+
|
| 491 |
+
Returns:
|
| 492 |
+
A `Tensor`. Has the same type as `input`.
|
| 493 |
+
"""
|
| 494 |
+
_ctx = _context._context or _context.context()
|
| 495 |
+
tld = _ctx._thread_local_data
|
| 496 |
+
if tld.is_eager:
|
| 497 |
+
try:
|
| 498 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 499 |
+
_ctx, "CollectiveBcastSend", name, input, "group_size", group_size,
|
| 500 |
+
"group_key", group_key, "instance_key", instance_key, "shape", shape,
|
| 501 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 502 |
+
timeout_seconds)
|
| 503 |
+
return _result
|
| 504 |
+
except _core._NotOkStatusException as e:
|
| 505 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 506 |
+
except _core._FallbackException:
|
| 507 |
+
pass
|
| 508 |
+
try:
|
| 509 |
+
return collective_bcast_send_eager_fallback(
|
| 510 |
+
input, group_size=group_size, group_key=group_key,
|
| 511 |
+
instance_key=instance_key, shape=shape,
|
| 512 |
+
communication_hint=communication_hint,
|
| 513 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 514 |
+
except _core._SymbolicException:
|
| 515 |
+
pass # Add nodes to the TensorFlow graph.
|
| 516 |
+
# Add nodes to the TensorFlow graph.
|
| 517 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 518 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 519 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 520 |
+
shape = _execute.make_shape(shape, "shape")
|
| 521 |
+
if communication_hint is None:
|
| 522 |
+
communication_hint = "auto"
|
| 523 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 524 |
+
if timeout_seconds is None:
|
| 525 |
+
timeout_seconds = 0
|
| 526 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 527 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 528 |
+
"CollectiveBcastSend", input=input, group_size=group_size,
|
| 529 |
+
group_key=group_key, instance_key=instance_key,
|
| 530 |
+
shape=shape,
|
| 531 |
+
communication_hint=communication_hint,
|
| 532 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 533 |
+
_result = _outputs[:]
|
| 534 |
+
if _execute.must_record_gradient():
|
| 535 |
+
_attrs = ("T", _op._get_attr_type("T"), "group_size",
|
| 536 |
+
_op._get_attr_int("group_size"), "group_key",
|
| 537 |
+
_op._get_attr_int("group_key"), "instance_key",
|
| 538 |
+
_op._get_attr_int("instance_key"), "shape",
|
| 539 |
+
_op.get_attr("shape"), "communication_hint",
|
| 540 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 541 |
+
_op.get_attr("timeout_seconds"))
|
| 542 |
+
_inputs_flat = _op.inputs
|
| 543 |
+
_execute.record_gradient(
|
| 544 |
+
"CollectiveBcastSend", _inputs_flat, _attrs, _result)
|
| 545 |
+
_result, = _result
|
| 546 |
+
return _result
|
| 547 |
+
|
| 548 |
+
CollectiveBcastSend = tf_export("raw_ops.CollectiveBcastSend")(_ops.to_raw_op(collective_bcast_send))
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def collective_bcast_send_eager_fallback(input: Annotated[Any, TV_CollectiveBcastSend_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastSend_T]:
|
| 552 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 553 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 554 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 555 |
+
shape = _execute.make_shape(shape, "shape")
|
| 556 |
+
if communication_hint is None:
|
| 557 |
+
communication_hint = "auto"
|
| 558 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 559 |
+
if timeout_seconds is None:
|
| 560 |
+
timeout_seconds = 0
|
| 561 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 562 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bool, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 563 |
+
_inputs_flat = [input]
|
| 564 |
+
_attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
|
| 565 |
+
"instance_key", instance_key, "shape", shape, "communication_hint",
|
| 566 |
+
communication_hint, "timeout_seconds", timeout_seconds)
|
| 567 |
+
_result = _execute.execute(b"CollectiveBcastSend", 1, inputs=_inputs_flat,
|
| 568 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 569 |
+
if _execute.must_record_gradient():
|
| 570 |
+
_execute.record_gradient(
|
| 571 |
+
"CollectiveBcastSend", _inputs_flat, _attrs, _result)
|
| 572 |
+
_result, = _result
|
| 573 |
+
return _result
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
TV_CollectiveBcastSendV2_T = TypeVar("TV_CollectiveBcastSendV2_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 577 |
+
|
| 578 |
+
def collective_bcast_send_v2(input: Annotated[Any, TV_CollectiveBcastSendV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastSendV2_T]:
|
| 579 |
+
r"""Broadcasts a tensor value to one or more other devices.
|
| 580 |
+
|
| 581 |
+
Args:
|
| 582 |
+
input: A `Tensor`. Must be one of the following types: `bool`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 583 |
+
group_size: A `Tensor` of type `int32`.
|
| 584 |
+
group_key: A `Tensor` of type `int32`.
|
| 585 |
+
instance_key: A `Tensor` of type `int32`.
|
| 586 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 587 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 588 |
+
name: A name for the operation (optional).
|
| 589 |
+
|
| 590 |
+
Returns:
|
| 591 |
+
A `Tensor`. Has the same type as `input`.
|
| 592 |
+
"""
|
| 593 |
+
_ctx = _context._context or _context.context()
|
| 594 |
+
tld = _ctx._thread_local_data
|
| 595 |
+
if tld.is_eager:
|
| 596 |
+
try:
|
| 597 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 598 |
+
_ctx, "CollectiveBcastSendV2", name, input, group_size, group_key,
|
| 599 |
+
instance_key, "communication_hint", communication_hint,
|
| 600 |
+
"timeout_seconds", timeout_seconds)
|
| 601 |
+
return _result
|
| 602 |
+
except _core._NotOkStatusException as e:
|
| 603 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 604 |
+
except _core._FallbackException:
|
| 605 |
+
pass
|
| 606 |
+
try:
|
| 607 |
+
return collective_bcast_send_v2_eager_fallback(
|
| 608 |
+
input, group_size, group_key, instance_key,
|
| 609 |
+
communication_hint=communication_hint,
|
| 610 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 611 |
+
except _core._SymbolicException:
|
| 612 |
+
pass # Add nodes to the TensorFlow graph.
|
| 613 |
+
# Add nodes to the TensorFlow graph.
|
| 614 |
+
if communication_hint is None:
|
| 615 |
+
communication_hint = "auto"
|
| 616 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 617 |
+
if timeout_seconds is None:
|
| 618 |
+
timeout_seconds = 0
|
| 619 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 620 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 621 |
+
"CollectiveBcastSendV2", input=input, group_size=group_size,
|
| 622 |
+
group_key=group_key,
|
| 623 |
+
instance_key=instance_key,
|
| 624 |
+
communication_hint=communication_hint,
|
| 625 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 626 |
+
_result = _outputs[:]
|
| 627 |
+
if _execute.must_record_gradient():
|
| 628 |
+
_attrs = ("T", _op._get_attr_type("T"), "communication_hint",
|
| 629 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 630 |
+
_op.get_attr("timeout_seconds"))
|
| 631 |
+
_inputs_flat = _op.inputs
|
| 632 |
+
_execute.record_gradient(
|
| 633 |
+
"CollectiveBcastSendV2", _inputs_flat, _attrs, _result)
|
| 634 |
+
_result, = _result
|
| 635 |
+
return _result
|
| 636 |
+
|
| 637 |
+
CollectiveBcastSendV2 = tf_export("raw_ops.CollectiveBcastSendV2")(_ops.to_raw_op(collective_bcast_send_v2))
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def collective_bcast_send_v2_eager_fallback(input: Annotated[Any, TV_CollectiveBcastSendV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastSendV2_T]:
|
| 641 |
+
if communication_hint is None:
|
| 642 |
+
communication_hint = "auto"
|
| 643 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 644 |
+
if timeout_seconds is None:
|
| 645 |
+
timeout_seconds = 0
|
| 646 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 647 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bool, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 648 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 649 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 650 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 651 |
+
_inputs_flat = [input, group_size, group_key, instance_key]
|
| 652 |
+
_attrs = ("T", _attr_T, "communication_hint", communication_hint,
|
| 653 |
+
"timeout_seconds", timeout_seconds)
|
| 654 |
+
_result = _execute.execute(b"CollectiveBcastSendV2", 1, inputs=_inputs_flat,
|
| 655 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 656 |
+
if _execute.must_record_gradient():
|
| 657 |
+
_execute.record_gradient(
|
| 658 |
+
"CollectiveBcastSendV2", _inputs_flat, _attrs, _result)
|
| 659 |
+
_result, = _result
|
| 660 |
+
return _result
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
TV_CollectiveGather_T = TypeVar("TV_CollectiveGather_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 664 |
+
|
| 665 |
+
def collective_gather(input: Annotated[Any, TV_CollectiveGather_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveGather_T]:
|
| 666 |
+
r"""Mutually accumulates multiple tensors of identical type and shape.
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`.
|
| 670 |
+
group_size: An `int`.
|
| 671 |
+
group_key: An `int`.
|
| 672 |
+
instance_key: An `int`.
|
| 673 |
+
shape: A `tf.TensorShape` or list of `ints`.
|
| 674 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 675 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 676 |
+
name: A name for the operation (optional).
|
| 677 |
+
|
| 678 |
+
Returns:
|
| 679 |
+
A `Tensor`. Has the same type as `input`.
|
| 680 |
+
"""
|
| 681 |
+
_ctx = _context._context or _context.context()
|
| 682 |
+
tld = _ctx._thread_local_data
|
| 683 |
+
if tld.is_eager:
|
| 684 |
+
try:
|
| 685 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 686 |
+
_ctx, "CollectiveGather", name, input, "group_size", group_size,
|
| 687 |
+
"group_key", group_key, "instance_key", instance_key, "shape", shape,
|
| 688 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 689 |
+
timeout_seconds)
|
| 690 |
+
return _result
|
| 691 |
+
except _core._NotOkStatusException as e:
|
| 692 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 693 |
+
except _core._FallbackException:
|
| 694 |
+
pass
|
| 695 |
+
try:
|
| 696 |
+
return collective_gather_eager_fallback(
|
| 697 |
+
input, group_size=group_size, group_key=group_key,
|
| 698 |
+
instance_key=instance_key, shape=shape,
|
| 699 |
+
communication_hint=communication_hint,
|
| 700 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 701 |
+
except _core._SymbolicException:
|
| 702 |
+
pass # Add nodes to the TensorFlow graph.
|
| 703 |
+
# Add nodes to the TensorFlow graph.
|
| 704 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 705 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 706 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 707 |
+
shape = _execute.make_shape(shape, "shape")
|
| 708 |
+
if communication_hint is None:
|
| 709 |
+
communication_hint = "auto"
|
| 710 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 711 |
+
if timeout_seconds is None:
|
| 712 |
+
timeout_seconds = 0
|
| 713 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 714 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 715 |
+
"CollectiveGather", input=input, group_size=group_size,
|
| 716 |
+
group_key=group_key, instance_key=instance_key,
|
| 717 |
+
shape=shape,
|
| 718 |
+
communication_hint=communication_hint,
|
| 719 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 720 |
+
_result = _outputs[:]
|
| 721 |
+
if _execute.must_record_gradient():
|
| 722 |
+
_attrs = ("T", _op._get_attr_type("T"), "group_size",
|
| 723 |
+
_op._get_attr_int("group_size"), "group_key",
|
| 724 |
+
_op._get_attr_int("group_key"), "instance_key",
|
| 725 |
+
_op._get_attr_int("instance_key"), "shape",
|
| 726 |
+
_op.get_attr("shape"), "communication_hint",
|
| 727 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 728 |
+
_op.get_attr("timeout_seconds"))
|
| 729 |
+
_inputs_flat = _op.inputs
|
| 730 |
+
_execute.record_gradient(
|
| 731 |
+
"CollectiveGather", _inputs_flat, _attrs, _result)
|
| 732 |
+
_result, = _result
|
| 733 |
+
return _result
|
| 734 |
+
|
| 735 |
+
CollectiveGather = tf_export("raw_ops.CollectiveGather")(_ops.to_raw_op(collective_gather))
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def collective_gather_eager_fallback(input: Annotated[Any, TV_CollectiveGather_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveGather_T]:
|
| 739 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 740 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 741 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 742 |
+
shape = _execute.make_shape(shape, "shape")
|
| 743 |
+
if communication_hint is None:
|
| 744 |
+
communication_hint = "auto"
|
| 745 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 746 |
+
if timeout_seconds is None:
|
| 747 |
+
timeout_seconds = 0
|
| 748 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 749 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 750 |
+
_inputs_flat = [input]
|
| 751 |
+
_attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
|
| 752 |
+
"instance_key", instance_key, "shape", shape, "communication_hint",
|
| 753 |
+
communication_hint, "timeout_seconds", timeout_seconds)
|
| 754 |
+
_result = _execute.execute(b"CollectiveGather", 1, inputs=_inputs_flat,
|
| 755 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 756 |
+
if _execute.must_record_gradient():
|
| 757 |
+
_execute.record_gradient(
|
| 758 |
+
"CollectiveGather", _inputs_flat, _attrs, _result)
|
| 759 |
+
_result, = _result
|
| 760 |
+
return _result
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
TV_CollectiveGatherV2_T = TypeVar("TV_CollectiveGatherV2_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 764 |
+
|
| 765 |
+
def collective_gather_v2(input: Annotated[Any, TV_CollectiveGatherV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, name=None) -> Annotated[Any, TV_CollectiveGatherV2_T]:
|
| 766 |
+
r"""Mutually accumulates multiple tensors of identical type and shape.
|
| 767 |
+
|
| 768 |
+
`is_stateless` means each op does not need control dependencies to other
|
| 769 |
+
collective ops. In this case, keys that are unique at runtime
|
| 770 |
+
(e.g. `instance_key`) should be used to distinguish collective groups.
|
| 771 |
+
|
| 772 |
+
Args:
|
| 773 |
+
input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`.
|
| 774 |
+
group_size: A `Tensor` of type `int32`.
|
| 775 |
+
group_key: A `Tensor` of type `int32`.
|
| 776 |
+
instance_key: A `Tensor` of type `int32`.
|
| 777 |
+
ordering_token: A list of `Tensor` objects with type `resource`.
|
| 778 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 779 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 780 |
+
is_stateless: An optional `bool`. Defaults to `False`.
|
| 781 |
+
name: A name for the operation (optional).
|
| 782 |
+
|
| 783 |
+
Returns:
|
| 784 |
+
A `Tensor`. Has the same type as `input`.
|
| 785 |
+
"""
|
| 786 |
+
_ctx = _context._context or _context.context()
|
| 787 |
+
tld = _ctx._thread_local_data
|
| 788 |
+
if tld.is_eager:
|
| 789 |
+
try:
|
| 790 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 791 |
+
_ctx, "CollectiveGatherV2", name, input, group_size, group_key,
|
| 792 |
+
instance_key, ordering_token, "communication_hint",
|
| 793 |
+
communication_hint, "timeout_seconds", timeout_seconds,
|
| 794 |
+
"is_stateless", is_stateless)
|
| 795 |
+
return _result
|
| 796 |
+
except _core._NotOkStatusException as e:
|
| 797 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 798 |
+
except _core._FallbackException:
|
| 799 |
+
pass
|
| 800 |
+
try:
|
| 801 |
+
return collective_gather_v2_eager_fallback(
|
| 802 |
+
input, group_size, group_key, instance_key, ordering_token,
|
| 803 |
+
communication_hint=communication_hint,
|
| 804 |
+
timeout_seconds=timeout_seconds, is_stateless=is_stateless,
|
| 805 |
+
name=name, ctx=_ctx)
|
| 806 |
+
except _core._SymbolicException:
|
| 807 |
+
pass # Add nodes to the TensorFlow graph.
|
| 808 |
+
# Add nodes to the TensorFlow graph.
|
| 809 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 810 |
+
raise TypeError(
|
| 811 |
+
"Expected list for 'ordering_token' argument to "
|
| 812 |
+
"'collective_gather_v2' Op, not %r." % ordering_token)
|
| 813 |
+
_attr_Nordering_token = len(ordering_token)
|
| 814 |
+
if communication_hint is None:
|
| 815 |
+
communication_hint = "auto"
|
| 816 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 817 |
+
if timeout_seconds is None:
|
| 818 |
+
timeout_seconds = 0
|
| 819 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 820 |
+
if is_stateless is None:
|
| 821 |
+
is_stateless = False
|
| 822 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 823 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 824 |
+
"CollectiveGatherV2", input=input, group_size=group_size,
|
| 825 |
+
group_key=group_key, instance_key=instance_key,
|
| 826 |
+
ordering_token=ordering_token,
|
| 827 |
+
communication_hint=communication_hint,
|
| 828 |
+
timeout_seconds=timeout_seconds,
|
| 829 |
+
is_stateless=is_stateless, name=name)
|
| 830 |
+
_result = _outputs[:]
|
| 831 |
+
if _execute.must_record_gradient():
|
| 832 |
+
_attrs = ("T", _op._get_attr_type("T"), "communication_hint",
|
| 833 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 834 |
+
_op.get_attr("timeout_seconds"), "is_stateless",
|
| 835 |
+
_op._get_attr_bool("is_stateless"), "Nordering_token",
|
| 836 |
+
_op._get_attr_int("Nordering_token"))
|
| 837 |
+
_inputs_flat = _op.inputs
|
| 838 |
+
_execute.record_gradient(
|
| 839 |
+
"CollectiveGatherV2", _inputs_flat, _attrs, _result)
|
| 840 |
+
_result, = _result
|
| 841 |
+
return _result
|
| 842 |
+
|
| 843 |
+
CollectiveGatherV2 = tf_export("raw_ops.CollectiveGatherV2")(_ops.to_raw_op(collective_gather_v2))
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
def collective_gather_v2_eager_fallback(input: Annotated[Any, TV_CollectiveGatherV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint: str, timeout_seconds: float, is_stateless: bool, name, ctx) -> Annotated[Any, TV_CollectiveGatherV2_T]:
|
| 847 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 848 |
+
raise TypeError(
|
| 849 |
+
"Expected list for 'ordering_token' argument to "
|
| 850 |
+
"'collective_gather_v2' Op, not %r." % ordering_token)
|
| 851 |
+
_attr_Nordering_token = len(ordering_token)
|
| 852 |
+
if communication_hint is None:
|
| 853 |
+
communication_hint = "auto"
|
| 854 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 855 |
+
if timeout_seconds is None:
|
| 856 |
+
timeout_seconds = 0
|
| 857 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 858 |
+
if is_stateless is None:
|
| 859 |
+
is_stateless = False
|
| 860 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 861 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 862 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 863 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 864 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 865 |
+
ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource)
|
| 866 |
+
_inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token)
|
| 867 |
+
_attrs = ("T", _attr_T, "communication_hint", communication_hint,
|
| 868 |
+
"timeout_seconds", timeout_seconds, "is_stateless", is_stateless,
|
| 869 |
+
"Nordering_token", _attr_Nordering_token)
|
| 870 |
+
_result = _execute.execute(b"CollectiveGatherV2", 1, inputs=_inputs_flat,
|
| 871 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 872 |
+
if _execute.must_record_gradient():
|
| 873 |
+
_execute.record_gradient(
|
| 874 |
+
"CollectiveGatherV2", _inputs_flat, _attrs, _result)
|
| 875 |
+
_result, = _result
|
| 876 |
+
return _result
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
def collective_initialize_communicator(group_key: Annotated[Any, _atypes.Int32], rank: Annotated[Any, _atypes.Int32], group_size: Annotated[Any, _atypes.Int32], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, _atypes.Resource]:
|
| 880 |
+
r"""Initializes a group for collective operations.
|
| 881 |
+
|
| 882 |
+
Args:
|
| 883 |
+
group_key: A `Tensor` of type `int32`.
|
| 884 |
+
rank: A `Tensor` of type `int32`.
|
| 885 |
+
group_size: A `Tensor` of type `int32`.
|
| 886 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 887 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 888 |
+
name: A name for the operation (optional).
|
| 889 |
+
|
| 890 |
+
Returns:
|
| 891 |
+
A `Tensor` of type `resource`.
|
| 892 |
+
"""
|
| 893 |
+
_ctx = _context._context or _context.context()
|
| 894 |
+
tld = _ctx._thread_local_data
|
| 895 |
+
if tld.is_eager:
|
| 896 |
+
try:
|
| 897 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 898 |
+
_ctx, "CollectiveInitializeCommunicator", name, group_key, rank,
|
| 899 |
+
group_size, "communication_hint", communication_hint,
|
| 900 |
+
"timeout_seconds", timeout_seconds)
|
| 901 |
+
return _result
|
| 902 |
+
except _core._NotOkStatusException as e:
|
| 903 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 904 |
+
except _core._FallbackException:
|
| 905 |
+
pass
|
| 906 |
+
try:
|
| 907 |
+
return collective_initialize_communicator_eager_fallback(
|
| 908 |
+
group_key, rank, group_size, communication_hint=communication_hint,
|
| 909 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 910 |
+
except _core._SymbolicException:
|
| 911 |
+
pass # Add nodes to the TensorFlow graph.
|
| 912 |
+
# Add nodes to the TensorFlow graph.
|
| 913 |
+
if communication_hint is None:
|
| 914 |
+
communication_hint = "auto"
|
| 915 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 916 |
+
if timeout_seconds is None:
|
| 917 |
+
timeout_seconds = 0
|
| 918 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 919 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 920 |
+
"CollectiveInitializeCommunicator", group_key=group_key, rank=rank,
|
| 921 |
+
group_size=group_size,
|
| 922 |
+
communication_hint=communication_hint,
|
| 923 |
+
timeout_seconds=timeout_seconds,
|
| 924 |
+
name=name)
|
| 925 |
+
_result = _outputs[:]
|
| 926 |
+
if _execute.must_record_gradient():
|
| 927 |
+
_attrs = ("communication_hint", _op.get_attr("communication_hint"),
|
| 928 |
+
"timeout_seconds", _op.get_attr("timeout_seconds"))
|
| 929 |
+
_inputs_flat = _op.inputs
|
| 930 |
+
_execute.record_gradient(
|
| 931 |
+
"CollectiveInitializeCommunicator", _inputs_flat, _attrs, _result)
|
| 932 |
+
_result, = _result
|
| 933 |
+
return _result
|
| 934 |
+
|
| 935 |
+
CollectiveInitializeCommunicator = tf_export("raw_ops.CollectiveInitializeCommunicator")(_ops.to_raw_op(collective_initialize_communicator))
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
def collective_initialize_communicator_eager_fallback(group_key: Annotated[Any, _atypes.Int32], rank: Annotated[Any, _atypes.Int32], group_size: Annotated[Any, _atypes.Int32], communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, _atypes.Resource]:
|
| 939 |
+
if communication_hint is None:
|
| 940 |
+
communication_hint = "auto"
|
| 941 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 942 |
+
if timeout_seconds is None:
|
| 943 |
+
timeout_seconds = 0
|
| 944 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 945 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 946 |
+
rank = _ops.convert_to_tensor(rank, _dtypes.int32)
|
| 947 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 948 |
+
_inputs_flat = [group_key, rank, group_size]
|
| 949 |
+
_attrs = ("communication_hint", communication_hint, "timeout_seconds",
|
| 950 |
+
timeout_seconds)
|
| 951 |
+
_result = _execute.execute(b"CollectiveInitializeCommunicator", 1,
|
| 952 |
+
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
|
| 953 |
+
name=name)
|
| 954 |
+
if _execute.must_record_gradient():
|
| 955 |
+
_execute.record_gradient(
|
| 956 |
+
"CollectiveInitializeCommunicator", _inputs_flat, _attrs, _result)
|
| 957 |
+
_result, = _result
|
| 958 |
+
return _result
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
TV_CollectiveReduce_T = TypeVar("TV_CollectiveReduce_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 962 |
+
|
| 963 |
+
def collective_reduce(input: Annotated[Any, TV_CollectiveReduce_T], group_size: int, group_key: int, instance_key: int, merge_op: str, final_op: str, subdiv_offsets, wait_for=[], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveReduce_T]:
|
| 964 |
+
r"""Mutually reduces multiple tensors of identical type and shape.
|
| 965 |
+
|
| 966 |
+
Args:
|
| 967 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 968 |
+
group_size: An `int`.
|
| 969 |
+
group_key: An `int`.
|
| 970 |
+
instance_key: An `int`.
|
| 971 |
+
merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`.
|
| 972 |
+
final_op: A `string` from: `"Id", "Div"`.
|
| 973 |
+
subdiv_offsets: A list of `ints`.
|
| 974 |
+
wait_for: An optional list of `ints`. Defaults to `[]`.
|
| 975 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 976 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 977 |
+
name: A name for the operation (optional).
|
| 978 |
+
|
| 979 |
+
Returns:
|
| 980 |
+
A `Tensor`. Has the same type as `input`.
|
| 981 |
+
"""
|
| 982 |
+
_ctx = _context._context or _context.context()
|
| 983 |
+
tld = _ctx._thread_local_data
|
| 984 |
+
if tld.is_eager:
|
| 985 |
+
try:
|
| 986 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 987 |
+
_ctx, "CollectiveReduce", name, input, "group_size", group_size,
|
| 988 |
+
"group_key", group_key, "instance_key", instance_key, "merge_op",
|
| 989 |
+
merge_op, "final_op", final_op, "subdiv_offsets", subdiv_offsets,
|
| 990 |
+
"wait_for", wait_for, "communication_hint", communication_hint,
|
| 991 |
+
"timeout_seconds", timeout_seconds)
|
| 992 |
+
return _result
|
| 993 |
+
except _core._NotOkStatusException as e:
|
| 994 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 995 |
+
except _core._FallbackException:
|
| 996 |
+
pass
|
| 997 |
+
try:
|
| 998 |
+
return collective_reduce_eager_fallback(
|
| 999 |
+
input, group_size=group_size, group_key=group_key,
|
| 1000 |
+
instance_key=instance_key, merge_op=merge_op, final_op=final_op,
|
| 1001 |
+
subdiv_offsets=subdiv_offsets, wait_for=wait_for,
|
| 1002 |
+
communication_hint=communication_hint,
|
| 1003 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 1004 |
+
except _core._SymbolicException:
|
| 1005 |
+
pass # Add nodes to the TensorFlow graph.
|
| 1006 |
+
# Add nodes to the TensorFlow graph.
|
| 1007 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 1008 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 1009 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 1010 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1011 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1012 |
+
if not isinstance(subdiv_offsets, (list, tuple)):
|
| 1013 |
+
raise TypeError(
|
| 1014 |
+
"Expected list for 'subdiv_offsets' argument to "
|
| 1015 |
+
"'collective_reduce' Op, not %r." % subdiv_offsets)
|
| 1016 |
+
subdiv_offsets = [_execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets]
|
| 1017 |
+
if wait_for is None:
|
| 1018 |
+
wait_for = []
|
| 1019 |
+
if not isinstance(wait_for, (list, tuple)):
|
| 1020 |
+
raise TypeError(
|
| 1021 |
+
"Expected list for 'wait_for' argument to "
|
| 1022 |
+
"'collective_reduce' Op, not %r." % wait_for)
|
| 1023 |
+
wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for]
|
| 1024 |
+
if communication_hint is None:
|
| 1025 |
+
communication_hint = "auto"
|
| 1026 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1027 |
+
if timeout_seconds is None:
|
| 1028 |
+
timeout_seconds = 0
|
| 1029 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1030 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 1031 |
+
"CollectiveReduce", input=input, group_size=group_size,
|
| 1032 |
+
group_key=group_key, instance_key=instance_key,
|
| 1033 |
+
merge_op=merge_op, final_op=final_op,
|
| 1034 |
+
subdiv_offsets=subdiv_offsets, wait_for=wait_for,
|
| 1035 |
+
communication_hint=communication_hint,
|
| 1036 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 1037 |
+
_result = _outputs[:]
|
| 1038 |
+
if _execute.must_record_gradient():
|
| 1039 |
+
_attrs = ("T", _op._get_attr_type("T"), "group_size",
|
| 1040 |
+
_op._get_attr_int("group_size"), "group_key",
|
| 1041 |
+
_op._get_attr_int("group_key"), "instance_key",
|
| 1042 |
+
_op._get_attr_int("instance_key"), "merge_op",
|
| 1043 |
+
_op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"),
|
| 1044 |
+
"subdiv_offsets", _op.get_attr("subdiv_offsets"), "wait_for",
|
| 1045 |
+
_op.get_attr("wait_for"), "communication_hint",
|
| 1046 |
+
_op.get_attr("communication_hint"), "timeout_seconds",
|
| 1047 |
+
_op.get_attr("timeout_seconds"))
|
| 1048 |
+
_inputs_flat = _op.inputs
|
| 1049 |
+
_execute.record_gradient(
|
| 1050 |
+
"CollectiveReduce", _inputs_flat, _attrs, _result)
|
| 1051 |
+
_result, = _result
|
| 1052 |
+
return _result
|
| 1053 |
+
|
| 1054 |
+
CollectiveReduce = tf_export("raw_ops.CollectiveReduce")(_ops.to_raw_op(collective_reduce))
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
def collective_reduce_eager_fallback(input: Annotated[Any, TV_CollectiveReduce_T], group_size: int, group_key: int, instance_key: int, merge_op: str, final_op: str, subdiv_offsets, wait_for, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveReduce_T]:
|
| 1058 |
+
group_size = _execute.make_int(group_size, "group_size")
|
| 1059 |
+
group_key = _execute.make_int(group_key, "group_key")
|
| 1060 |
+
instance_key = _execute.make_int(instance_key, "instance_key")
|
| 1061 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1062 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1063 |
+
if not isinstance(subdiv_offsets, (list, tuple)):
|
| 1064 |
+
raise TypeError(
|
| 1065 |
+
"Expected list for 'subdiv_offsets' argument to "
|
| 1066 |
+
"'collective_reduce' Op, not %r." % subdiv_offsets)
|
| 1067 |
+
subdiv_offsets = [_execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets]
|
| 1068 |
+
if wait_for is None:
|
| 1069 |
+
wait_for = []
|
| 1070 |
+
if not isinstance(wait_for, (list, tuple)):
|
| 1071 |
+
raise TypeError(
|
| 1072 |
+
"Expected list for 'wait_for' argument to "
|
| 1073 |
+
"'collective_reduce' Op, not %r." % wait_for)
|
| 1074 |
+
wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for]
|
| 1075 |
+
if communication_hint is None:
|
| 1076 |
+
communication_hint = "auto"
|
| 1077 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1078 |
+
if timeout_seconds is None:
|
| 1079 |
+
timeout_seconds = 0
|
| 1080 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1081 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 1082 |
+
_inputs_flat = [input]
|
| 1083 |
+
_attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key,
|
| 1084 |
+
"instance_key", instance_key, "merge_op", merge_op, "final_op", final_op,
|
| 1085 |
+
"subdiv_offsets", subdiv_offsets, "wait_for", wait_for,
|
| 1086 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 1087 |
+
timeout_seconds)
|
| 1088 |
+
_result = _execute.execute(b"CollectiveReduce", 1, inputs=_inputs_flat,
|
| 1089 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 1090 |
+
if _execute.must_record_gradient():
|
| 1091 |
+
_execute.record_gradient(
|
| 1092 |
+
"CollectiveReduce", _inputs_flat, _attrs, _result)
|
| 1093 |
+
_result, = _result
|
| 1094 |
+
return _result
|
| 1095 |
+
|
| 1096 |
+
|
| 1097 |
+
TV_CollectiveReduceScatterV2_T = TypeVar("TV_CollectiveReduceScatterV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 1098 |
+
|
| 1099 |
+
def collective_reduce_scatter_v2(input: Annotated[Any, TV_CollectiveReduceScatterV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, max_subdivs_per_device:int=-1, name=None) -> Annotated[Any, TV_CollectiveReduceScatterV2_T]:
|
| 1100 |
+
r"""Mutually reduces multiple tensors of identical type and shape and scatters the result.
|
| 1101 |
+
|
| 1102 |
+
`is_stateless` means each op does not need control dependencies to other
|
| 1103 |
+
collective ops. In this case, keys that are unique at runtime
|
| 1104 |
+
(e.g. `instance_key`) should be used to distinguish collective groups.
|
| 1105 |
+
|
| 1106 |
+
Args:
|
| 1107 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 1108 |
+
group_size: A `Tensor` of type `int32`.
|
| 1109 |
+
group_key: A `Tensor` of type `int32`.
|
| 1110 |
+
instance_key: A `Tensor` of type `int32`.
|
| 1111 |
+
ordering_token: A list of `Tensor` objects with type `resource`.
|
| 1112 |
+
merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`.
|
| 1113 |
+
final_op: A `string` from: `"Id", "Div"`.
|
| 1114 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 1115 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 1116 |
+
is_stateless: An optional `bool`. Defaults to `False`.
|
| 1117 |
+
max_subdivs_per_device: An optional `int`. Defaults to `-1`.
|
| 1118 |
+
name: A name for the operation (optional).
|
| 1119 |
+
|
| 1120 |
+
Returns:
|
| 1121 |
+
A `Tensor`. Has the same type as `input`.
|
| 1122 |
+
"""
|
| 1123 |
+
_ctx = _context._context or _context.context()
|
| 1124 |
+
tld = _ctx._thread_local_data
|
| 1125 |
+
if tld.is_eager:
|
| 1126 |
+
try:
|
| 1127 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 1128 |
+
_ctx, "CollectiveReduceScatterV2", name, input, group_size, group_key,
|
| 1129 |
+
instance_key, ordering_token, "merge_op", merge_op, "final_op",
|
| 1130 |
+
final_op, "communication_hint", communication_hint, "timeout_seconds",
|
| 1131 |
+
timeout_seconds, "is_stateless", is_stateless,
|
| 1132 |
+
"max_subdivs_per_device", max_subdivs_per_device)
|
| 1133 |
+
return _result
|
| 1134 |
+
except _core._NotOkStatusException as e:
|
| 1135 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 1136 |
+
except _core._FallbackException:
|
| 1137 |
+
pass
|
| 1138 |
+
try:
|
| 1139 |
+
return collective_reduce_scatter_v2_eager_fallback(
|
| 1140 |
+
input, group_size, group_key, instance_key, ordering_token,
|
| 1141 |
+
merge_op=merge_op, final_op=final_op,
|
| 1142 |
+
communication_hint=communication_hint,
|
| 1143 |
+
timeout_seconds=timeout_seconds, is_stateless=is_stateless,
|
| 1144 |
+
max_subdivs_per_device=max_subdivs_per_device, name=name, ctx=_ctx)
|
| 1145 |
+
except _core._SymbolicException:
|
| 1146 |
+
pass # Add nodes to the TensorFlow graph.
|
| 1147 |
+
# Add nodes to the TensorFlow graph.
|
| 1148 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 1149 |
+
raise TypeError(
|
| 1150 |
+
"Expected list for 'ordering_token' argument to "
|
| 1151 |
+
"'collective_reduce_scatter_v2' Op, not %r." % ordering_token)
|
| 1152 |
+
_attr_Nordering_token = len(ordering_token)
|
| 1153 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1154 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1155 |
+
if communication_hint is None:
|
| 1156 |
+
communication_hint = "auto"
|
| 1157 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1158 |
+
if timeout_seconds is None:
|
| 1159 |
+
timeout_seconds = 0
|
| 1160 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1161 |
+
if is_stateless is None:
|
| 1162 |
+
is_stateless = False
|
| 1163 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 1164 |
+
if max_subdivs_per_device is None:
|
| 1165 |
+
max_subdivs_per_device = -1
|
| 1166 |
+
max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device")
|
| 1167 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 1168 |
+
"CollectiveReduceScatterV2", input=input, group_size=group_size,
|
| 1169 |
+
group_key=group_key,
|
| 1170 |
+
instance_key=instance_key,
|
| 1171 |
+
ordering_token=ordering_token,
|
| 1172 |
+
merge_op=merge_op, final_op=final_op,
|
| 1173 |
+
communication_hint=communication_hint,
|
| 1174 |
+
timeout_seconds=timeout_seconds,
|
| 1175 |
+
is_stateless=is_stateless,
|
| 1176 |
+
max_subdivs_per_device=max_subdivs_per_device,
|
| 1177 |
+
name=name)
|
| 1178 |
+
_result = _outputs[:]
|
| 1179 |
+
if _execute.must_record_gradient():
|
| 1180 |
+
_attrs = ("T", _op._get_attr_type("T"), "merge_op",
|
| 1181 |
+
_op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"),
|
| 1182 |
+
"communication_hint", _op.get_attr("communication_hint"),
|
| 1183 |
+
"timeout_seconds", _op.get_attr("timeout_seconds"),
|
| 1184 |
+
"is_stateless", _op._get_attr_bool("is_stateless"),
|
| 1185 |
+
"Nordering_token", _op._get_attr_int("Nordering_token"),
|
| 1186 |
+
"max_subdivs_per_device",
|
| 1187 |
+
_op._get_attr_int("max_subdivs_per_device"))
|
| 1188 |
+
_inputs_flat = _op.inputs
|
| 1189 |
+
_execute.record_gradient(
|
| 1190 |
+
"CollectiveReduceScatterV2", _inputs_flat, _attrs, _result)
|
| 1191 |
+
_result, = _result
|
| 1192 |
+
return _result
|
| 1193 |
+
|
| 1194 |
+
CollectiveReduceScatterV2 = tf_export("raw_ops.CollectiveReduceScatterV2")(_ops.to_raw_op(collective_reduce_scatter_v2))
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
def collective_reduce_scatter_v2_eager_fallback(input: Annotated[Any, TV_CollectiveReduceScatterV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint: str, timeout_seconds: float, is_stateless: bool, max_subdivs_per_device: int, name, ctx) -> Annotated[Any, TV_CollectiveReduceScatterV2_T]:
|
| 1198 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 1199 |
+
raise TypeError(
|
| 1200 |
+
"Expected list for 'ordering_token' argument to "
|
| 1201 |
+
"'collective_reduce_scatter_v2' Op, not %r." % ordering_token)
|
| 1202 |
+
_attr_Nordering_token = len(ordering_token)
|
| 1203 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1204 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1205 |
+
if communication_hint is None:
|
| 1206 |
+
communication_hint = "auto"
|
| 1207 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1208 |
+
if timeout_seconds is None:
|
| 1209 |
+
timeout_seconds = 0
|
| 1210 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1211 |
+
if is_stateless is None:
|
| 1212 |
+
is_stateless = False
|
| 1213 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 1214 |
+
if max_subdivs_per_device is None:
|
| 1215 |
+
max_subdivs_per_device = -1
|
| 1216 |
+
max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device")
|
| 1217 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 1218 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 1219 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 1220 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 1221 |
+
ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource)
|
| 1222 |
+
_inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token)
|
| 1223 |
+
_attrs = ("T", _attr_T, "merge_op", merge_op, "final_op", final_op,
|
| 1224 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 1225 |
+
timeout_seconds, "is_stateless", is_stateless, "Nordering_token",
|
| 1226 |
+
_attr_Nordering_token, "max_subdivs_per_device", max_subdivs_per_device)
|
| 1227 |
+
_result = _execute.execute(b"CollectiveReduceScatterV2", 1,
|
| 1228 |
+
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
|
| 1229 |
+
name=name)
|
| 1230 |
+
if _execute.must_record_gradient():
|
| 1231 |
+
_execute.record_gradient(
|
| 1232 |
+
"CollectiveReduceScatterV2", _inputs_flat, _attrs, _result)
|
| 1233 |
+
_result, = _result
|
| 1234 |
+
return _result
|
| 1235 |
+
|
| 1236 |
+
|
| 1237 |
+
TV_CollectiveReduceV2_T = TypeVar("TV_CollectiveReduceV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 1238 |
+
|
| 1239 |
+
def collective_reduce_v2(input: Annotated[Any, TV_CollectiveReduceV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, max_subdivs_per_device:int=-1, name=None) -> Annotated[Any, TV_CollectiveReduceV2_T]:
|
| 1240 |
+
r"""Mutually reduces multiple tensors of identical type and shape.
|
| 1241 |
+
|
| 1242 |
+
`is_stateless` means each op does not need control dependencies to other
|
| 1243 |
+
collective ops. In this case, keys that are unique at runtime
|
| 1244 |
+
(e.g. `instance_key`) should be used to distinguish collective groups.
|
| 1245 |
+
|
| 1246 |
+
Args:
|
| 1247 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 1248 |
+
group_size: A `Tensor` of type `int32`.
|
| 1249 |
+
group_key: A `Tensor` of type `int32`.
|
| 1250 |
+
instance_key: A `Tensor` of type `int32`.
|
| 1251 |
+
ordering_token: A list of `Tensor` objects with type `resource`.
|
| 1252 |
+
merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`.
|
| 1253 |
+
final_op: A `string` from: `"Id", "Div"`.
|
| 1254 |
+
communication_hint: An optional `string`. Defaults to `"auto"`.
|
| 1255 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 1256 |
+
is_stateless: An optional `bool`. Defaults to `False`.
|
| 1257 |
+
max_subdivs_per_device: An optional `int`. Defaults to `-1`.
|
| 1258 |
+
name: A name for the operation (optional).
|
| 1259 |
+
|
| 1260 |
+
Returns:
|
| 1261 |
+
A `Tensor`. Has the same type as `input`.
|
| 1262 |
+
"""
|
| 1263 |
+
_ctx = _context._context or _context.context()
|
| 1264 |
+
tld = _ctx._thread_local_data
|
| 1265 |
+
if tld.is_eager:
|
| 1266 |
+
try:
|
| 1267 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 1268 |
+
_ctx, "CollectiveReduceV2", name, input, group_size, group_key,
|
| 1269 |
+
instance_key, ordering_token, "merge_op", merge_op, "final_op",
|
| 1270 |
+
final_op, "communication_hint", communication_hint, "timeout_seconds",
|
| 1271 |
+
timeout_seconds, "is_stateless", is_stateless,
|
| 1272 |
+
"max_subdivs_per_device", max_subdivs_per_device)
|
| 1273 |
+
return _result
|
| 1274 |
+
except _core._NotOkStatusException as e:
|
| 1275 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 1276 |
+
except _core._FallbackException:
|
| 1277 |
+
pass
|
| 1278 |
+
try:
|
| 1279 |
+
return collective_reduce_v2_eager_fallback(
|
| 1280 |
+
input, group_size, group_key, instance_key, ordering_token,
|
| 1281 |
+
merge_op=merge_op, final_op=final_op,
|
| 1282 |
+
communication_hint=communication_hint,
|
| 1283 |
+
timeout_seconds=timeout_seconds, is_stateless=is_stateless,
|
| 1284 |
+
max_subdivs_per_device=max_subdivs_per_device, name=name, ctx=_ctx)
|
| 1285 |
+
except _core._SymbolicException:
|
| 1286 |
+
pass # Add nodes to the TensorFlow graph.
|
| 1287 |
+
# Add nodes to the TensorFlow graph.
|
| 1288 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 1289 |
+
raise TypeError(
|
| 1290 |
+
"Expected list for 'ordering_token' argument to "
|
| 1291 |
+
"'collective_reduce_v2' Op, not %r." % ordering_token)
|
| 1292 |
+
_attr_Nordering_token = len(ordering_token)
|
| 1293 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1294 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1295 |
+
if communication_hint is None:
|
| 1296 |
+
communication_hint = "auto"
|
| 1297 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1298 |
+
if timeout_seconds is None:
|
| 1299 |
+
timeout_seconds = 0
|
| 1300 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1301 |
+
if is_stateless is None:
|
| 1302 |
+
is_stateless = False
|
| 1303 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 1304 |
+
if max_subdivs_per_device is None:
|
| 1305 |
+
max_subdivs_per_device = -1
|
| 1306 |
+
max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device")
|
| 1307 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 1308 |
+
"CollectiveReduceV2", input=input, group_size=group_size,
|
| 1309 |
+
group_key=group_key, instance_key=instance_key,
|
| 1310 |
+
ordering_token=ordering_token,
|
| 1311 |
+
merge_op=merge_op, final_op=final_op,
|
| 1312 |
+
communication_hint=communication_hint,
|
| 1313 |
+
timeout_seconds=timeout_seconds,
|
| 1314 |
+
is_stateless=is_stateless,
|
| 1315 |
+
max_subdivs_per_device=max_subdivs_per_device,
|
| 1316 |
+
name=name)
|
| 1317 |
+
_result = _outputs[:]
|
| 1318 |
+
if _execute.must_record_gradient():
|
| 1319 |
+
_attrs = ("T", _op._get_attr_type("T"), "merge_op",
|
| 1320 |
+
_op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"),
|
| 1321 |
+
"communication_hint", _op.get_attr("communication_hint"),
|
| 1322 |
+
"timeout_seconds", _op.get_attr("timeout_seconds"),
|
| 1323 |
+
"is_stateless", _op._get_attr_bool("is_stateless"),
|
| 1324 |
+
"Nordering_token", _op._get_attr_int("Nordering_token"),
|
| 1325 |
+
"max_subdivs_per_device",
|
| 1326 |
+
_op._get_attr_int("max_subdivs_per_device"))
|
| 1327 |
+
_inputs_flat = _op.inputs
|
| 1328 |
+
_execute.record_gradient(
|
| 1329 |
+
"CollectiveReduceV2", _inputs_flat, _attrs, _result)
|
| 1330 |
+
_result, = _result
|
| 1331 |
+
return _result
|
| 1332 |
+
|
| 1333 |
+
CollectiveReduceV2 = tf_export("raw_ops.CollectiveReduceV2")(_ops.to_raw_op(collective_reduce_v2))
|
| 1334 |
+
|
| 1335 |
+
|
| 1336 |
+
def collective_reduce_v2_eager_fallback(input: Annotated[Any, TV_CollectiveReduceV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint: str, timeout_seconds: float, is_stateless: bool, max_subdivs_per_device: int, name, ctx) -> Annotated[Any, TV_CollectiveReduceV2_T]:
|
| 1337 |
+
if not isinstance(ordering_token, (list, tuple)):
|
| 1338 |
+
raise TypeError(
|
| 1339 |
+
"Expected list for 'ordering_token' argument to "
|
| 1340 |
+
"'collective_reduce_v2' Op, not %r." % ordering_token)
|
| 1341 |
+
_attr_Nordering_token = len(ordering_token)
|
| 1342 |
+
merge_op = _execute.make_str(merge_op, "merge_op")
|
| 1343 |
+
final_op = _execute.make_str(final_op, "final_op")
|
| 1344 |
+
if communication_hint is None:
|
| 1345 |
+
communication_hint = "auto"
|
| 1346 |
+
communication_hint = _execute.make_str(communication_hint, "communication_hint")
|
| 1347 |
+
if timeout_seconds is None:
|
| 1348 |
+
timeout_seconds = 0
|
| 1349 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1350 |
+
if is_stateless is None:
|
| 1351 |
+
is_stateless = False
|
| 1352 |
+
is_stateless = _execute.make_bool(is_stateless, "is_stateless")
|
| 1353 |
+
if max_subdivs_per_device is None:
|
| 1354 |
+
max_subdivs_per_device = -1
|
| 1355 |
+
max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device")
|
| 1356 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 1357 |
+
group_size = _ops.convert_to_tensor(group_size, _dtypes.int32)
|
| 1358 |
+
group_key = _ops.convert_to_tensor(group_key, _dtypes.int32)
|
| 1359 |
+
instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32)
|
| 1360 |
+
ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource)
|
| 1361 |
+
_inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token)
|
| 1362 |
+
_attrs = ("T", _attr_T, "merge_op", merge_op, "final_op", final_op,
|
| 1363 |
+
"communication_hint", communication_hint, "timeout_seconds",
|
| 1364 |
+
timeout_seconds, "is_stateless", is_stateless, "Nordering_token",
|
| 1365 |
+
_attr_Nordering_token, "max_subdivs_per_device", max_subdivs_per_device)
|
| 1366 |
+
_result = _execute.execute(b"CollectiveReduceV2", 1, inputs=_inputs_flat,
|
| 1367 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 1368 |
+
if _execute.must_record_gradient():
|
| 1369 |
+
_execute.record_gradient(
|
| 1370 |
+
"CollectiveReduceV2", _inputs_flat, _attrs, _result)
|
| 1371 |
+
_result, = _result
|
| 1372 |
+
return _result
|
| 1373 |
+
|
| 1374 |
+
|
| 1375 |
+
TV_CollectiveReduceV3_T = TypeVar("TV_CollectiveReduceV3_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64)
|
| 1376 |
+
|
| 1377 |
+
def collective_reduce_v3(input: Annotated[Any, TV_CollectiveReduceV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], reduction: str, timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveReduceV3_T]:
|
| 1378 |
+
r"""Mutually reduces multiple tensors of identical type and shape.
|
| 1379 |
+
|
| 1380 |
+
Args:
|
| 1381 |
+
input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`.
|
| 1382 |
+
communicator: A `Tensor` of type `resource`.
|
| 1383 |
+
group_assignment: A `Tensor` of type `int32`.
|
| 1384 |
+
reduction: A `string` from: `"Min", "Max", "Mul", "Add"`.
|
| 1385 |
+
timeout_seconds: An optional `float`. Defaults to `0`.
|
| 1386 |
+
name: A name for the operation (optional).
|
| 1387 |
+
|
| 1388 |
+
Returns:
|
| 1389 |
+
A `Tensor`. Has the same type as `input`.
|
| 1390 |
+
"""
|
| 1391 |
+
_ctx = _context._context or _context.context()
|
| 1392 |
+
tld = _ctx._thread_local_data
|
| 1393 |
+
if tld.is_eager:
|
| 1394 |
+
try:
|
| 1395 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 1396 |
+
_ctx, "CollectiveReduceV3", name, input, communicator,
|
| 1397 |
+
group_assignment, "reduction", reduction, "timeout_seconds",
|
| 1398 |
+
timeout_seconds)
|
| 1399 |
+
return _result
|
| 1400 |
+
except _core._NotOkStatusException as e:
|
| 1401 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 1402 |
+
except _core._FallbackException:
|
| 1403 |
+
pass
|
| 1404 |
+
try:
|
| 1405 |
+
return collective_reduce_v3_eager_fallback(
|
| 1406 |
+
input, communicator, group_assignment, reduction=reduction,
|
| 1407 |
+
timeout_seconds=timeout_seconds, name=name, ctx=_ctx)
|
| 1408 |
+
except _core._SymbolicException:
|
| 1409 |
+
pass # Add nodes to the TensorFlow graph.
|
| 1410 |
+
# Add nodes to the TensorFlow graph.
|
| 1411 |
+
reduction = _execute.make_str(reduction, "reduction")
|
| 1412 |
+
if timeout_seconds is None:
|
| 1413 |
+
timeout_seconds = 0
|
| 1414 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1415 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 1416 |
+
"CollectiveReduceV3", input=input, communicator=communicator,
|
| 1417 |
+
group_assignment=group_assignment,
|
| 1418 |
+
reduction=reduction,
|
| 1419 |
+
timeout_seconds=timeout_seconds, name=name)
|
| 1420 |
+
_result = _outputs[:]
|
| 1421 |
+
if _execute.must_record_gradient():
|
| 1422 |
+
_attrs = ("T", _op._get_attr_type("T"), "reduction",
|
| 1423 |
+
_op.get_attr("reduction"), "timeout_seconds",
|
| 1424 |
+
_op.get_attr("timeout_seconds"))
|
| 1425 |
+
_inputs_flat = _op.inputs
|
| 1426 |
+
_execute.record_gradient(
|
| 1427 |
+
"CollectiveReduceV3", _inputs_flat, _attrs, _result)
|
| 1428 |
+
_result, = _result
|
| 1429 |
+
return _result
|
| 1430 |
+
|
| 1431 |
+
CollectiveReduceV3 = tf_export("raw_ops.CollectiveReduceV3")(_ops.to_raw_op(collective_reduce_v3))
|
| 1432 |
+
|
| 1433 |
+
|
| 1434 |
+
def collective_reduce_v3_eager_fallback(input: Annotated[Any, TV_CollectiveReduceV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], reduction: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveReduceV3_T]:
|
| 1435 |
+
reduction = _execute.make_str(reduction, "reduction")
|
| 1436 |
+
if timeout_seconds is None:
|
| 1437 |
+
timeout_seconds = 0
|
| 1438 |
+
timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds")
|
| 1439 |
+
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
|
| 1440 |
+
communicator = _ops.convert_to_tensor(communicator, _dtypes.resource)
|
| 1441 |
+
group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32)
|
| 1442 |
+
_inputs_flat = [input, communicator, group_assignment]
|
| 1443 |
+
_attrs = ("T", _attr_T, "reduction", reduction, "timeout_seconds",
|
| 1444 |
+
timeout_seconds)
|
| 1445 |
+
_result = _execute.execute(b"CollectiveReduceV3", 1, inputs=_inputs_flat,
|
| 1446 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 1447 |
+
if _execute.must_record_gradient():
|
| 1448 |
+
_execute.record_gradient(
|
| 1449 |
+
"CollectiveReduceV3", _inputs_flat, _attrs, _result)
|
| 1450 |
+
_result, = _result
|
| 1451 |
+
return _result
|
| 1452 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_composite_tensor_ops.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Python wrappers around TensorFlow ops.
|
| 2 |
+
|
| 3 |
+
This file is MACHINE GENERATED! Do not edit.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
|
| 8 |
+
from tensorflow.python import pywrap_tfe as pywrap_tfe
|
| 9 |
+
from tensorflow.python.eager import context as _context
|
| 10 |
+
from tensorflow.python.eager import core as _core
|
| 11 |
+
from tensorflow.python.eager import execute as _execute
|
| 12 |
+
from tensorflow.python.framework import dtypes as _dtypes
|
| 13 |
+
from tensorflow.security.fuzzing.py import annotation_types as _atypes
|
| 14 |
+
|
| 15 |
+
from tensorflow.python.framework import op_def_registry as _op_def_registry
|
| 16 |
+
from tensorflow.python.framework import ops as _ops
|
| 17 |
+
from tensorflow.python.framework import op_def_library as _op_def_library
|
| 18 |
+
from tensorflow.python.util.deprecation import deprecated_endpoints
|
| 19 |
+
from tensorflow.python.util import dispatch as _dispatch
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
from typing import TypeVar, List, Any
|
| 23 |
+
from typing_extensions import Annotated
|
| 24 |
+
|
| 25 |
+
def composite_tensor_variant_from_components(components, metadata: str, name=None) -> Annotated[Any, _atypes.Variant]:
|
| 26 |
+
r"""Encodes an `ExtensionType` value into a `variant` scalar Tensor.
|
| 27 |
+
|
| 28 |
+
Returns a scalar variant tensor containing a single `CompositeTensorVariant`
|
| 29 |
+
with the specified Tensor components and TypeSpec.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
components: A list of `Tensor` objects.
|
| 33 |
+
The component tensors for the extension type value.
|
| 34 |
+
metadata: A `string`.
|
| 35 |
+
String serialization for the TypeSpec. (Note: the encoding for the TypeSpec
|
| 36 |
+
may change in future versions of TensorFlow.)
|
| 37 |
+
name: A name for the operation (optional).
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
A `Tensor` of type `variant`.
|
| 41 |
+
"""
|
| 42 |
+
_ctx = _context._context or _context.context()
|
| 43 |
+
tld = _ctx._thread_local_data
|
| 44 |
+
if tld.is_eager:
|
| 45 |
+
try:
|
| 46 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 47 |
+
_ctx, "CompositeTensorVariantFromComponents", name, components,
|
| 48 |
+
"metadata", metadata)
|
| 49 |
+
return _result
|
| 50 |
+
except _core._NotOkStatusException as e:
|
| 51 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 52 |
+
except _core._FallbackException:
|
| 53 |
+
pass
|
| 54 |
+
try:
|
| 55 |
+
return composite_tensor_variant_from_components_eager_fallback(
|
| 56 |
+
components, metadata=metadata, name=name, ctx=_ctx)
|
| 57 |
+
except _core._SymbolicException:
|
| 58 |
+
pass # Add nodes to the TensorFlow graph.
|
| 59 |
+
# Add nodes to the TensorFlow graph.
|
| 60 |
+
metadata = _execute.make_str(metadata, "metadata")
|
| 61 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 62 |
+
"CompositeTensorVariantFromComponents", components=components,
|
| 63 |
+
metadata=metadata, name=name)
|
| 64 |
+
_result = _outputs[:]
|
| 65 |
+
if _execute.must_record_gradient():
|
| 66 |
+
_attrs = ("metadata", _op.get_attr("metadata"), "Tcomponents",
|
| 67 |
+
_op.get_attr("Tcomponents"))
|
| 68 |
+
_inputs_flat = _op.inputs
|
| 69 |
+
_execute.record_gradient(
|
| 70 |
+
"CompositeTensorVariantFromComponents", _inputs_flat, _attrs, _result)
|
| 71 |
+
_result, = _result
|
| 72 |
+
return _result
|
| 73 |
+
|
| 74 |
+
CompositeTensorVariantFromComponents = tf_export("raw_ops.CompositeTensorVariantFromComponents")(_ops.to_raw_op(composite_tensor_variant_from_components))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def composite_tensor_variant_from_components_eager_fallback(components, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]:
|
| 78 |
+
metadata = _execute.make_str(metadata, "metadata")
|
| 79 |
+
_attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, ctx)
|
| 80 |
+
_inputs_flat = list(components)
|
| 81 |
+
_attrs = ("metadata", metadata, "Tcomponents", _attr_Tcomponents)
|
| 82 |
+
_result = _execute.execute(b"CompositeTensorVariantFromComponents", 1,
|
| 83 |
+
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
|
| 84 |
+
name=name)
|
| 85 |
+
if _execute.must_record_gradient():
|
| 86 |
+
_execute.record_gradient(
|
| 87 |
+
"CompositeTensorVariantFromComponents", _inputs_flat, _attrs, _result)
|
| 88 |
+
_result, = _result
|
| 89 |
+
return _result
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def composite_tensor_variant_to_components(encoded: Annotated[Any, _atypes.Variant], metadata: str, Tcomponents, name=None):
|
| 93 |
+
r"""Decodes a `variant` scalar Tensor into an `ExtensionType` value.
|
| 94 |
+
|
| 95 |
+
Returns the Tensor components encoded in a `CompositeTensorVariant`.
|
| 96 |
+
|
| 97 |
+
Raises an error if `type_spec_proto` doesn't match the TypeSpec
|
| 98 |
+
in `encoded`.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
encoded: A `Tensor` of type `variant`.
|
| 102 |
+
A scalar `variant` Tensor containing an encoded ExtensionType value.
|
| 103 |
+
metadata: A `string`.
|
| 104 |
+
String serialization for the TypeSpec. Must be compatible with the
|
| 105 |
+
`TypeSpec` contained in `encoded`. (Note: the encoding for the TypeSpec
|
| 106 |
+
may change in future versions of TensorFlow.)
|
| 107 |
+
Tcomponents: A list of `tf.DTypes`. Expected dtypes for components.
|
| 108 |
+
name: A name for the operation (optional).
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
A list of `Tensor` objects of type `Tcomponents`.
|
| 112 |
+
"""
|
| 113 |
+
_ctx = _context._context or _context.context()
|
| 114 |
+
tld = _ctx._thread_local_data
|
| 115 |
+
if tld.is_eager:
|
| 116 |
+
try:
|
| 117 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 118 |
+
_ctx, "CompositeTensorVariantToComponents", name, encoded, "metadata",
|
| 119 |
+
metadata, "Tcomponents", Tcomponents)
|
| 120 |
+
return _result
|
| 121 |
+
except _core._NotOkStatusException as e:
|
| 122 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 123 |
+
except _core._FallbackException:
|
| 124 |
+
pass
|
| 125 |
+
try:
|
| 126 |
+
return composite_tensor_variant_to_components_eager_fallback(
|
| 127 |
+
encoded, metadata=metadata, Tcomponents=Tcomponents, name=name,
|
| 128 |
+
ctx=_ctx)
|
| 129 |
+
except _core._SymbolicException:
|
| 130 |
+
pass # Add nodes to the TensorFlow graph.
|
| 131 |
+
# Add nodes to the TensorFlow graph.
|
| 132 |
+
metadata = _execute.make_str(metadata, "metadata")
|
| 133 |
+
if not isinstance(Tcomponents, (list, tuple)):
|
| 134 |
+
raise TypeError(
|
| 135 |
+
"Expected list for 'Tcomponents' argument to "
|
| 136 |
+
"'composite_tensor_variant_to_components' Op, not %r." % Tcomponents)
|
| 137 |
+
Tcomponents = [_execute.make_type(_t, "Tcomponents") for _t in Tcomponents]
|
| 138 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 139 |
+
"CompositeTensorVariantToComponents", encoded=encoded,
|
| 140 |
+
metadata=metadata,
|
| 141 |
+
Tcomponents=Tcomponents,
|
| 142 |
+
name=name)
|
| 143 |
+
_result = _outputs[:]
|
| 144 |
+
if _execute.must_record_gradient():
|
| 145 |
+
_attrs = ("metadata", _op.get_attr("metadata"), "Tcomponents",
|
| 146 |
+
_op.get_attr("Tcomponents"))
|
| 147 |
+
_inputs_flat = _op.inputs
|
| 148 |
+
_execute.record_gradient(
|
| 149 |
+
"CompositeTensorVariantToComponents", _inputs_flat, _attrs, _result)
|
| 150 |
+
return _result
|
| 151 |
+
|
| 152 |
+
CompositeTensorVariantToComponents = tf_export("raw_ops.CompositeTensorVariantToComponents")(_ops.to_raw_op(composite_tensor_variant_to_components))
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def composite_tensor_variant_to_components_eager_fallback(encoded: Annotated[Any, _atypes.Variant], metadata: str, Tcomponents, name, ctx):
|
| 156 |
+
metadata = _execute.make_str(metadata, "metadata")
|
| 157 |
+
if not isinstance(Tcomponents, (list, tuple)):
|
| 158 |
+
raise TypeError(
|
| 159 |
+
"Expected list for 'Tcomponents' argument to "
|
| 160 |
+
"'composite_tensor_variant_to_components' Op, not %r." % Tcomponents)
|
| 161 |
+
Tcomponents = [_execute.make_type(_t, "Tcomponents") for _t in Tcomponents]
|
| 162 |
+
encoded = _ops.convert_to_tensor(encoded, _dtypes.variant)
|
| 163 |
+
_inputs_flat = [encoded]
|
| 164 |
+
_attrs = ("metadata", metadata, "Tcomponents", Tcomponents)
|
| 165 |
+
_result = _execute.execute(b"CompositeTensorVariantToComponents",
|
| 166 |
+
len(Tcomponents), inputs=_inputs_flat,
|
| 167 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 168 |
+
if _execute.must_record_gradient():
|
| 169 |
+
_execute.record_gradient(
|
| 170 |
+
"CompositeTensorVariantToComponents", _inputs_flat, _attrs, _result)
|
| 171 |
+
return _result
|
| 172 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|