diff --git a/llava_next/share/terminfo/a/aaa-30-s-rv b/llava_next/share/terminfo/a/aaa-30-s-rv new file mode 100644 index 0000000000000000000000000000000000000000..d677272a516cc1336eaeb294a902145217e57275 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-30-s-rv differ diff --git a/llava_next/share/terminfo/a/aaa-rv-unk b/llava_next/share/terminfo/a/aaa-rv-unk new file mode 100644 index 0000000000000000000000000000000000000000..388a0cb4c8ed8c3fe9d9aabea678584de9f7c30a Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-rv-unk differ diff --git a/llava_next/share/terminfo/a/abm85h-old b/llava_next/share/terminfo/a/abm85h-old new file mode 100644 index 0000000000000000000000000000000000000000..0bcff37f88af06697d4fafcce69bc35a5826a9e8 Binary files /dev/null and b/llava_next/share/terminfo/a/abm85h-old differ diff --git a/llava_next/share/terminfo/a/adds200 b/llava_next/share/terminfo/a/adds200 new file mode 100644 index 0000000000000000000000000000000000000000..07a979a17eaf7266c59be87b8546752a56505739 Binary files /dev/null and b/llava_next/share/terminfo/a/adds200 differ diff --git a/llava_next/share/terminfo/a/addsvp60 b/llava_next/share/terminfo/a/addsvp60 new file mode 100644 index 0000000000000000000000000000000000000000..3c28adffab491960f3d4811c3467e035adbd887e Binary files /dev/null and b/llava_next/share/terminfo/a/addsvp60 differ diff --git a/llava_next/share/terminfo/a/adm+sgr b/llava_next/share/terminfo/a/adm+sgr new file mode 100644 index 0000000000000000000000000000000000000000..afbc99edc591a8ef453b49b0f6dda5b0ac491aa3 Binary files /dev/null and b/llava_next/share/terminfo/a/adm+sgr differ diff --git a/llava_next/share/terminfo/a/adm36 b/llava_next/share/terminfo/a/adm36 new file mode 100644 index 0000000000000000000000000000000000000000..8850ad937fb9ad8c05292b13ec45a8a21a5684cd Binary files /dev/null and b/llava_next/share/terminfo/a/adm36 differ diff --git a/llava_next/share/terminfo/a/aj830 b/llava_next/share/terminfo/a/aj830 new file mode 100644 index 0000000000000000000000000000000000000000..cccfdb431d04529d2ece2f9b8f482ff3f0fbea31 Binary files /dev/null and b/llava_next/share/terminfo/a/aj830 differ diff --git a/llava_next/share/terminfo/a/ampex-219 b/llava_next/share/terminfo/a/ampex-219 new file mode 100644 index 0000000000000000000000000000000000000000..39f23d67e5a7a5cc36945eba5a8a7288473ec90e Binary files /dev/null and b/llava_next/share/terminfo/a/ampex-219 differ diff --git a/llava_next/share/terminfo/a/annarbor4080 b/llava_next/share/terminfo/a/annarbor4080 new file mode 100644 index 0000000000000000000000000000000000000000..078a29061d455389dacddfb3d694feb5021dc640 Binary files /dev/null and b/llava_next/share/terminfo/a/annarbor4080 differ diff --git a/llava_next/share/terminfo/a/ansi+pp b/llava_next/share/terminfo/a/ansi+pp new file mode 100644 index 0000000000000000000000000000000000000000..7ab68b197e64e7805070f28bfcae489881a5609a Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+pp differ diff --git a/llava_next/share/terminfo/a/ansi+sgr b/llava_next/share/terminfo/a/ansi+sgr new file mode 100644 index 0000000000000000000000000000000000000000..2d93102417c5cc39fc9e2b2c7ef7e41ae293235e Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+sgr differ diff --git a/llava_next/share/terminfo/a/ansi+sgrul b/llava_next/share/terminfo/a/ansi+sgrul new file mode 100644 index 0000000000000000000000000000000000000000..d07663a5b3212a09760848bff83539bfc7819211 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+sgrul differ diff --git a/llava_next/share/terminfo/a/ansi-color-2-emx b/llava_next/share/terminfo/a/ansi-color-2-emx new file mode 100644 index 0000000000000000000000000000000000000000..4eab4da7191a5531bff9d34cbdd112ae0a0ef2a6 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi-color-2-emx differ diff --git a/llava_next/share/terminfo/a/apl b/llava_next/share/terminfo/a/apl new file mode 100644 index 0000000000000000000000000000000000000000..679a5744156214ad39b0705bd622bc2f998884cd Binary files /dev/null and b/llava_next/share/terminfo/a/apl differ diff --git a/llava_next/share/terminfo/a/at-color b/llava_next/share/terminfo/a/at-color new file mode 100644 index 0000000000000000000000000000000000000000..608e45420287020211bd17a09032e575a6eef743 Binary files /dev/null and b/llava_next/share/terminfo/a/at-color differ diff --git a/llava_next/share/terminfo/a/atari b/llava_next/share/terminfo/a/atari new file mode 100644 index 0000000000000000000000000000000000000000..b48ef93d33406f0934cecb007347e51cb49172bc Binary files /dev/null and b/llava_next/share/terminfo/a/atari differ diff --git a/llava_next/share/terminfo/a/atari_st b/llava_next/share/terminfo/a/atari_st new file mode 100644 index 0000000000000000000000000000000000000000..b48ef93d33406f0934cecb007347e51cb49172bc Binary files /dev/null and b/llava_next/share/terminfo/a/atari_st differ diff --git a/llava_next/share/terminfo/a/atari_st-color b/llava_next/share/terminfo/a/atari_st-color new file mode 100644 index 0000000000000000000000000000000000000000..608e45420287020211bd17a09032e575a6eef743 Binary files /dev/null and b/llava_next/share/terminfo/a/atari_st-color differ diff --git a/llava_next/share/terminfo/a/att2350 b/llava_next/share/terminfo/a/att2350 new file mode 100644 index 0000000000000000000000000000000000000000..0bceae2ed6f8fd65564afe00ca33ee332d53e1cd Binary files /dev/null and b/llava_next/share/terminfo/a/att2350 differ diff --git a/llava_next/share/terminfo/a/att4410v1 b/llava_next/share/terminfo/a/att4410v1 new file mode 100644 index 0000000000000000000000000000000000000000..d7d7336bc66004b8cb23a52c0f0ee0922c1c0dc0 Binary files /dev/null and b/llava_next/share/terminfo/a/att4410v1 differ diff --git a/llava_next/share/terminfo/a/att4410v1-w b/llava_next/share/terminfo/a/att4410v1-w new file mode 100644 index 0000000000000000000000000000000000000000..96225b8ef915f6e68d3e5dae17cb826d57ba66dc Binary files /dev/null and b/llava_next/share/terminfo/a/att4410v1-w differ diff --git a/llava_next/share/terminfo/a/att4424 b/llava_next/share/terminfo/a/att4424 new file mode 100644 index 0000000000000000000000000000000000000000..41b83bfa6a80fab78a664c3d0efe1d57d9942fb8 Binary files /dev/null and b/llava_next/share/terminfo/a/att4424 differ diff --git a/llava_next/share/terminfo/a/att4425 b/llava_next/share/terminfo/a/att4425 new file mode 100644 index 0000000000000000000000000000000000000000..ff371581cbba289cfe439f7d76f9cd9857328a7b Binary files /dev/null and b/llava_next/share/terminfo/a/att4425 differ diff --git a/llava_next/share/terminfo/a/att4426 b/llava_next/share/terminfo/a/att4426 new file mode 100644 index 0000000000000000000000000000000000000000..510154af0b26106069c772ecc9e1660516820b20 Binary files /dev/null and b/llava_next/share/terminfo/a/att4426 differ diff --git a/llava_next/share/terminfo/a/att505-22 b/llava_next/share/terminfo/a/att505-22 new file mode 100644 index 0000000000000000000000000000000000000000..bbf0d9a1a0969f9627684f6a45de7e8880107c7e Binary files /dev/null and b/llava_next/share/terminfo/a/att505-22 differ diff --git a/llava_next/share/terminfo/a/att5420-w-rv b/llava_next/share/terminfo/a/att5420-w-rv new file mode 100644 index 0000000000000000000000000000000000000000..dde6ba9dbaa732ad429d60e78639b741e2f6b6c6 Binary files /dev/null and b/llava_next/share/terminfo/a/att5420-w-rv differ diff --git a/llava_next/share/terminfo/a/att5425-w b/llava_next/share/terminfo/a/att5425-w new file mode 100644 index 0000000000000000000000000000000000000000..e9ffd490042e48c5b7836f97d083ae854799ff7e Binary files /dev/null and b/llava_next/share/terminfo/a/att5425-w differ diff --git a/llava_next/share/terminfo/a/att615-103k-w b/llava_next/share/terminfo/a/att615-103k-w new file mode 100644 index 0000000000000000000000000000000000000000..2f7f6651dfcdd8059069a39bf00feb19c9d01cf8 Binary files /dev/null and b/llava_next/share/terminfo/a/att615-103k-w differ diff --git a/llava_next/share/terminfo/a/att700 b/llava_next/share/terminfo/a/att700 new file mode 100644 index 0000000000000000000000000000000000000000..a151504daed43eaf3eae0cb38fe3e0546bea6e2d Binary files /dev/null and b/llava_next/share/terminfo/a/att700 differ diff --git a/llava_next/share/terminfo/a/att730-24 b/llava_next/share/terminfo/a/att730-24 new file mode 100644 index 0000000000000000000000000000000000000000..cf1e9a2042de51f038be78dd9ad9ef486dab2122 Binary files /dev/null and b/llava_next/share/terminfo/a/att730-24 differ diff --git a/llava_next/share/terminfo/a/att7300 b/llava_next/share/terminfo/a/att7300 new file mode 100644 index 0000000000000000000000000000000000000000..b41843d6a40a7d583c1dab1a8ab7da28442fbab6 Binary files /dev/null and b/llava_next/share/terminfo/a/att7300 differ diff --git a/llava_next/share/terminfo/a/avt b/llava_next/share/terminfo/a/avt new file mode 100644 index 0000000000000000000000000000000000000000..d38aa508e932054b06a0142e0562c8f38ccc2a0d Binary files /dev/null and b/llava_next/share/terminfo/a/avt differ diff --git a/llava_next/share/terminfo/a/avt-w b/llava_next/share/terminfo/a/avt-w new file mode 100644 index 0000000000000000000000000000000000000000..ced6270b62cad39a573c737e7769ef529811c6d4 Binary files /dev/null and b/llava_next/share/terminfo/a/avt-w differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/batch_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/batch_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0361ea242946b32ecd38b8a61cad096dbadd813c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/batch_ops.py @@ -0,0 +1,123 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations for automatic batching and unbatching.""" +from tensorflow.python.eager import def_function +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import gen_batch_ops +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_batch_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("nondifferentiable_batch_function") +def batch_function(num_batch_threads, + max_batch_size, + batch_timeout_micros, + allowed_batch_sizes=None, + max_enqueued_batches=10, + autograph=True, + enable_large_batch_splitting=True): + """Batches the computation done by the decorated function. + + So, for example, in the following code + + ```python + @batch_function(1, 2, 3) + def layer(a): + return tf.matmul(a, a) + + b = layer(w) + ``` + + if more than one session.run call is simultaneously trying to compute `b` + the values of `w` will be gathered, non-deterministically concatenated + along the first axis, and only one thread will run the computation. See the + documentation of the `Batch` op for more details. + + Assumes that all arguments of the decorated function are Tensors which will + be batched along their first dimension. + + SparseTensor is not supported. The return value of the decorated function + must be a Tensor or a list/tuple of Tensors. + + Args: + num_batch_threads: Number of scheduling threads for processing batches + of work. Determines the number of batches processed in parallel. + max_batch_size: Batch sizes will never be bigger than this. + batch_timeout_micros: Maximum number of microseconds to wait before + outputting an incomplete batch. + allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, + does nothing. Otherwise, supplies a list of batch sizes, causing the op + to pad batches up to one of those sizes. The entries must increase + monotonically, and the final entry must equal max_batch_size. + max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10. + autograph: Whether to use autograph to compile python and eager style code + for efficient graph-mode execution. + enable_large_batch_splitting: The value of this option doesn't affect + processing output given the same input; it affects implementation details + as stated below: 1. Improve batching efficiency by eliminating unnecessary + adding. 2.`max_batch_size` specifies the limit of input and + `allowed_batch_sizes` specifies the limit of a task to be processed. API + user can give an input of size 128 when 'max_execution_batch_size' + is 32 -> implementation can split input of 128 into 4 x 32, schedule + concurrent processing, and then return concatenated results corresponding + to 128. + + Returns: + The decorated function will return the unbatched computation output Tensors. + """ + + def decorator(fn): # pylint: disable=missing-docstring + + def decorated(*args): # pylint: disable=missing-docstring + + @def_function.function(autograph=autograph) + def computation(*computation_args): + return fn(*computation_args) + + computation = computation.get_concrete_function(*[ + tensor.TensorSpec( + dtype=x.dtype, shape=x.shape, name="batch_" + str(i)) + for i, x in enumerate(args) + ]) + + with ops.name_scope("batch") as name: + for a in args: + if not isinstance(a, tensor.Tensor): + raise ValueError("All arguments to functions decorated with " + "`batch_function` are supposed to be Tensors; " + f"found {a!r}.") + outputs = gen_batch_ops.batch_function( + num_batch_threads=num_batch_threads, + max_batch_size=max_batch_size, + batch_timeout_micros=batch_timeout_micros, + allowed_batch_sizes=allowed_batch_sizes, + max_enqueued_batches=max_enqueued_batches, + shared_name=name, + enable_large_batch_splitting=enable_large_batch_splitting, + f=computation, + in_tensors=list(args), + captured_tensors=computation.captured_inputs, + Tout=[o.dtype for o in computation.outputs]) + return nest.pack_sequence_as( + computation.structured_outputs, outputs, expand_composites=True) + + return decorated + + return decorator diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/bitwise_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/bitwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b2e31946a1b044645c7f4e45c7be5301de0dcdd8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/bitwise_ops.py @@ -0,0 +1,33 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations for manipulating the binary representations of integers. + +API docstring: tensorflow.bitwise +""" + +from tensorflow.python.framework import ops +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_bitwise_ops import * +# pylint: enable=wildcard-import + +ops.NotDifferentiable("BitwiseAnd") +ops.NotDifferentiable("BitwiseOr") +ops.NotDifferentiable("BitwiseXor") +ops.NotDifferentiable("Invert") +ops.NotDifferentiable("PopulationCount") +ops.NotDifferentiable("LeftShift") +ops.NotDifferentiable("RightShift") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/composite_tensor_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/composite_tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..51a44613f6ddb13f181a026668d3b818a1fedbd0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/composite_tensor_ops.py @@ -0,0 +1,118 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Operations for ExtensionTypes (aka Composite Tensors).""" + +from tensorflow.core.protobuf import composite_tensor_variant_pb2 +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import gen_composite_tensor_ops +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.util import nest + + +def composite_tensor_to_variants(value, type_spec=None, name=None): + """Encodes `value` as a scalar variant tensor. + + Args: + value: The `ExtensionType` value to encode. + type_spec: Information about the value's type that should be included in the + encoding. + name: Optional name for the operation. + + Returns: + A Tensor with shape=`()` and dtype=`tf.variant`. + + Raises: + ValueError: If `type_spec` is not compatible with `value`. + """ + if not isinstance(value, composite_tensor.CompositeTensor): + raise TypeError("Expected `value` to be a CompositeTensor. " + f"Received {type(value)}.") + + if type_spec is None: + type_spec = value._type_spec # pylint: disable=protected-access + if not type_spec.is_compatible_with(value): + raise ValueError(f"`type_spec` {type_spec} is not compatible with `value` " + f"{value!r}.") + metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata() + metadata.type_spec_proto.CopyFrom( + nested_structure_coder.encode_structure(type_spec).type_spec_value) + + return gen_composite_tensor_ops.CompositeTensorVariantFromComponents( + components=nest.flatten(value, expand_composites=True), + metadata=metadata.SerializeToString(), + name=name) + + +def composite_tensor_from_variant(encoded, type_spec, name=None): + """Returns the `ExtensionType` value encoded by a variant scalar tensor. + + Args: + encoded: A Tensor returned by `composite_tensor_to_variants`. + type_spec: The `TypeSpec` of the original value. This is used to determine + the number and types of the component tensors that comprise the decoded + value. Must be compatible with the `TypeSpec` serilized in `encoded`. + name: Optional name for the operation. + + Returns: + An `ExtensionType` value that is compatible with `TypeSpec`. + + Raises: + TypeError: If `encoded` is not a Tensor with dtype=variant. + InvalidArgumentError: If `encoded` is not compatible with `type_spec`. + """ + if not isinstance(encoded, tensor.Tensor): + raise TypeError(f"Expected `encoded` to be a Tensor, got {encoded!r}.") + if encoded.dtype != dtypes.variant: + raise TypeError("Expected `encoded` to have dtype=variant, got " + f"{encoded!r}.") + encoded.shape.assert_is_compatible_with(()) + + metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata() + metadata.type_spec_proto.CopyFrom( + nested_structure_coder.encode_structure(type_spec).type_spec_value) + + component_dtypes = [ + t.dtype for t in nest.flatten(type_spec, expand_composites=True) + ] + + components = gen_composite_tensor_ops.CompositeTensorVariantToComponents( + encoded=encoded, + metadata=metadata.SerializeToString(), + Tcomponents=component_dtypes, + name=name) + return nest.pack_sequence_as(type_spec, components, expand_composites=True) + + +@ops.RegisterGradient("CompositeTensorVariantFromComponents") +def _composite_tensor_to_variants_grad(op, grad): + return gen_composite_tensor_ops.CompositeTensorVariantToComponents( + encoded=grad, + metadata=op.get_attr("metadata"), + Tcomponents=op.get_attr("Tcomponents")) + + +@ops.RegisterGradient("CompositeTensorVariantToComponents") +def _composite_tensor_from_variant_grad(op, *grad): + assert len(grad) == len(op.outputs) + # `components` is `op.outputs`, but with any tensors for which we're + # taking the gradient replaced by the corresponding value from `grad`. + components = [ + op.outputs[i] if grad[i] is None else grad[i] for i in range(len(grad)) + ] + return gen_composite_tensor_ops.CompositeTensorVariantFromComponents( + components=components, metadata=op.get_attr("metadata")) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_assert.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_assert.py new file mode 100644 index 0000000000000000000000000000000000000000..18928fa6f357544912c9c55845e622f415e43167 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_assert.py @@ -0,0 +1,130 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Assert functions for Control Flow Operations.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import gen_control_flow_ops +from tensorflow.python.ops import gen_logging_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util import tf_should_use +from tensorflow.python.util.tf_export import tf_export + + +def _summarize_eager(tensor, summarize=None): + """Returns a summarized string representation of eager `tensor`. + + Args: + tensor: EagerTensor to summarize + summarize: Include these many first elements of `array` + """ + # Emulate the behavior of Tensor::SummarizeValue() + if summarize is None: + summarize = 3 + elif summarize < 0: + summarize = array_ops.size(tensor) + + # reshape((-1,)) is the fastest way to get a flat array view + if tensor._rank(): # pylint: disable=protected-access + flat = tensor.numpy().reshape((-1,)) + lst = [str(x) for x in flat[:summarize]] + if len(lst) < flat.size: + lst.append("...") + else: + # tensor.numpy() returns a scalar for zero dimensional arrays + if gen_math_ops.not_equal(summarize, 0): + lst = [str(tensor.numpy())] + else: + lst = [] + + return ", ".join(lst) + + +# Assert and Print are special symbols in python, so we must +# use an upper-case version of them. +@tf_export("debugging.Assert", "Assert") +@dispatch.add_dispatch_support +@tf_should_use.should_use_result +def Assert(condition, data, summarize=None, name=None): + """Asserts that the given condition is true. + + If `condition` evaluates to false, print the list of tensors in `data`. + `summarize` determines how many entries of the tensors to print. + + Args: + condition: The condition to evaluate. + data: The tensors to print out when condition is false. + summarize: Print this many entries of each tensor. + name: A name for this operation (optional). + + Returns: + assert_op: An `Operation` that, when executed, raises a + `tf.errors.InvalidArgumentError` if `condition` is not true. + @compatibility(eager) + returns None + @end_compatibility + + Raises: + @compatibility(TF1) + When in TF V1 mode (that is, outside `tf.function`) Assert needs a control + dependency on the output to ensure the assertion executes: + + ```python + # Ensure maximum element of x is smaller or equal to 1 + assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x]) + with tf.control_dependencies([assert_op]): + ... code using x ... + ``` + + @end_compatibility + """ + if context.executing_eagerly(): + if not condition: + xs = ops.convert_n_to_tensor(data) + data_str = [_summarize_eager(x, summarize) for x in xs] + raise errors.InvalidArgumentError( + node_def=None, + op=None, + message="Expected '%s' to be true. Summarized data: %s" % + (condition, "\n".join(data_str))) + return + + with ops.name_scope(name, "Assert", [condition, data]) as name: + xs = ops.convert_n_to_tensor(data) + if all(x.dtype in {dtypes.string, dtypes.int32} for x in xs): + # As a simple heuristic, we assume that string and int32 are + # on host to avoid the need to use cond. If it is not case, + # we will pay the price copying the tensor to host memory. + return gen_logging_ops._assert(condition, data, summarize, name="Assert") # pylint: disable=protected-access + else: + condition = ops.convert_to_tensor(condition, name="Condition") + + def true_assert(): + return gen_logging_ops._assert( # pylint: disable=protected-access + condition, data, summarize, name="Assert") + + guarded_assert = cond.cond( + condition, + gen_control_flow_ops.no_op, + true_assert, + name="AssertGuard") + if context.executing_eagerly(): + return + return guarded_assert.op diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..489ebe344bf4d42e3334d5182c1749296267d42e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_ops.py @@ -0,0 +1,2256 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Control Flow Operations. + +See the [autograph](https://www.tensorflow.org/guide/autograph) guide. +""" +# pylint: disable=g-bad-name +import abc + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.protobuf import control_flow_pb2 +from tensorflow.python.eager import context +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_util as util +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import tensor_array_ops +# go/tf-wildcard-import +# pylint: disable=wildcard-import,undefined-variable +from tensorflow.python.ops.gen_control_flow_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.util import compat +from tensorflow.python.util import dispatch +from tensorflow.python.util import nest +from tensorflow.python.util import variable_utils +from tensorflow.python.util.tf_export import tf_export + + +# We override the 'tuple' for a control flow op, so we keep python's +# existing 'tuple' for later use in this module. +_basetuple = tuple + + +# pylint: disable=protected-access + + +def _Identity(tensor, name=None): + """Return a tensor with the same shape and contents as the input tensor. + + Args: + tensor: A Tensor. + name: A name for this operation (optional). + + Returns: + A Tensor with the same type and value as the input Tensor. + """ + tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True) + # TODO(b/246438937): Remove this when we expand ResourceVariables into + # dt_resource tensors. + tensor = variable_utils.convert_variables_to_tensors(tensor) + if isinstance(tensor, tensor_lib.Tensor): + if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access + return gen_array_ops.ref_identity(tensor, name=name) + else: + return array_ops.identity(tensor, name=name) + elif isinstance(tensor, composite_tensor.CompositeTensor): + return nest.map_structure(_Identity, tensor, expand_composites=True) + else: + raise TypeError("'tensor' must be a Tensor or CompositeTensor. " + f"Received: {type(tensor)}.") + + +def _NextIteration(tensor, name=None): + tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True) + if isinstance(tensor, tensor_lib.Tensor): + if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access + return ref_next_iteration(tensor, name=name) + else: + return next_iteration(tensor, name=name) + elif isinstance(tensor, composite_tensor.CompositeTensor): + return nest.map_structure(_NextIteration, tensor, expand_composites=True) + else: + raise TypeError("'tensor' must be a Tensor or CompositeTensor. " + f"Received: {type(tensor)}.") + + +def _Enter(tensor, + frame_name, + is_constant=False, + parallel_iterations=10, + use_ref=True, + use_input_shape=True, + name=None): + """Creates or finds a child frame, and makes `tensor` available to it. + + The unique `frame_name` is used by the `Executor` to identify frames. If + `is_constant` is true, `tensor` is a constant in the child frame; otherwise + it may be changed in the child frame. At most `parallel_iterations` + iterations are run in parallel in the child frame. + + Args: + tensor: The tensor to be made available to the child frame. + frame_name: The name of the child frame. + is_constant: If true, the output is constant within the child frame. + parallel_iterations: The number of iterations allowed to run in parallel. + use_ref: If true, use ref_enter if tensor is of ref type. + use_input_shape: If true, set the result's shape based on tensor's shape. + name: A name for this operation (optional). + + Returns: + The same tensor as `tensor`. + + Raises: + ValueError: If any tensor in `tensor` has a less specific shape + than its corresponding shape in `shape_invariant`. + """ + tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True) + if isinstance(tensor, tensor_lib.Tensor): + if tensor.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access + result = gen_control_flow_ops.ref_enter( + tensor, frame_name, is_constant, parallel_iterations, name=name) + else: + result = gen_control_flow_ops.enter( + tensor, frame_name, is_constant, parallel_iterations, name=name) + if use_input_shape: + result.set_shape(tensor.get_shape()) + return result + elif isinstance(tensor, composite_tensor.CompositeTensor): + + def enter_component(t): + return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref, + use_input_shape) + + return nest.map_structure(enter_component, tensor, expand_composites=True) + else: + raise TypeError("'tensor' must be a Tensor or CompositeTensor. " + f"Received: {type(tensor)}.") + + +def exit(tensor, name=None): # pylint: disable=redefined-builtin + """Exits the current frame to its parent frame. + + Exit makes its input `tensor` available to the parent frame. + + Args: + tensor: The tensor to be made available to the parent frame. + name: A name for this operation (optional). + + Returns: + The same tensor as `tensor`. + """ + tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True) + if isinstance(tensor, tensor_lib.Tensor): + if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access + return gen_control_flow_ops.ref_exit(tensor, name) + else: + return gen_control_flow_ops._exit(tensor, name) + elif isinstance(tensor, composite_tensor.CompositeTensor): + return nest.map_structure(exit, tensor, expand_composites=True) + else: + raise TypeError("'tensor' must be a Tensor or CompositeTensor. " + f"Received: {type(tensor)}.") + + +def switch(data, pred, dtype=None, name=None): + """Forwards `data` to an output determined by `pred`. + + If `pred` is false, the `data` input is forwarded to the first output. + Otherwise, the data goes to the second output. + + This op handles `Tensor`s and `IndexedSlices`. + + Args: + data: The tensor to be forwarded to the appropriate output. + pred: A scalar that specifies which output port will receive data. + dtype: Optional element type for the returned tensor. If missing, the type + is inferred from the type of `value`. + name: A name for this operation (optional). + + Returns: + `(output_false, output_true)`: If `pred` is true, data will be forwarded + to `output_true`, otherwise it goes to `output_false`. + """ + with ops.name_scope(name, "Switch", [data, pred]) as name: + data = ops.internal_convert_to_tensor_or_composite( + data, dtype=dtype, name="data", as_ref=True) + pred = ops.convert_to_tensor(pred, name="pred") + if isinstance(data, tensor_lib.Tensor): + return gen_control_flow_ops.switch(data, pred, name=name) + else: + if not isinstance(data, composite_tensor.CompositeTensor): + raise TypeError( + "'data' must be a Tensor or CompositeTensor. " + f"Received: {type(data)}.") + tensors = nest.flatten(data, expand_composites=True) + mapped = [gen_control_flow_ops.switch(tensor, pred) for tensor in tensors] + mapped_f, mapped_t = zip(*mapped) + return (nest.pack_sequence_as(data, mapped_f, expand_composites=True), + nest.pack_sequence_as(data, mapped_t, expand_composites=True)) + + +def _SwitchRefOrTensor(data, pred, name="Switch"): + """Forwards `data` to an output determined by `pred`. + + If `pred` is false, the `data` input is forwarded to the first output. + Otherwise, the data goes to the second output. + + This op handles `Tensor`s and `IndexedSlices`. + + Args: + data: The tensor to be forwarded to the appropriate output. + pred: A scalar that specifies which output port will receive data. + name: A name for this operation (optional). + + Returns: + `(output_false, output_true)`: If `pred` is true, data will be forwarded to + `output_true`, otherwise it goes to `output_false`. + + Raises: + TypeError: if data is not a Tensor or IndexedSlices + """ + data = ops.convert_to_tensor_or_composite(data, name="data") + # NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below + # addresses the following scenario. + # + # Assume you execute Optimizer.apply_gradients() in a branch of a cond(). + # + # 1. The update op is created inside a `with ops.colocate(var):` block + # + # 2. Some tensor `data` is captured and a switch is created in a + # `with ops.colocate_with(data):` block. + # + # with ops.colocate_with(var): + # with ops.colocate_with(data): + # op = ... + # + # var and data may be pinned to different devices, so we want to ops + # created within ops.colocate_with(data) to ignore the existing stack. + with ops.colocate_with(data, ignore_existing=True): + if isinstance(data, tensor_lib.Tensor): + if data.dtype._is_ref_dtype: # pylint: disable=protected-access + return ref_switch(data, pred, name=name) + return switch(data, pred, name=name) + + +def merge(inputs, name=None): + """Returns the value of an available element of `inputs`. + + This op tests each of the tensors in `inputs` in turn to determine if any of + them is available. If it finds an available tensor, it returns it and its + index in `inputs`. + + It is an error if more than one tensor in `inputs` is available. If no tensor + in `inputs` is available, the returned tensor and index are not set. + + This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of + `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices + before merging. + + Args: + inputs: The input tensors, at most one of which is available. + name: A name for this operation (optional). + + Returns: + A tuple containing the chosen input tensor and its index in `inputs`. + + Raises: + ValueError: If any of the inputs is None, or inputs are IndexedSlices and + some but not all have a dense_shape property. + """ + if any(inp is None for inp in inputs): + raise ValueError("At least one of the merge inputs is None: %s" % inputs) + with ops.name_scope(name, "Merge", inputs) as name: + inputs = [ + ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) + for inp in inputs + ] + if all(isinstance(v, tensor_lib.Tensor) for v in inputs): + if all(v.dtype._is_ref_dtype for v in inputs): # pylint: disable=protected-access + return gen_control_flow_ops.ref_merge(inputs, name) + else: + return gen_control_flow_ops.merge(inputs, name) + else: + # If there is a mix of tensors and indexed slices, then convert the + # tensors to indexed slices. + if all( + isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor)) + for v in inputs): + inputs = math_ops._as_indexed_slices_list(inputs, optimize=False) + + for v in inputs: + if not isinstance(v, composite_tensor.CompositeTensor): + raise TypeError("Type %s not supported" % type(v)) + + for v in inputs[1:]: + nest.assert_same_structure(inputs[0], v, expand_composites=True) + + flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs] + merged_results = [ + gen_control_flow_ops.merge(component) + for component in zip(*flat_inputs) + ] + flat_merged = [tensor for (tensor, _) in merged_results] + chosen_index = merged_results[0][1] + merged_inputs = nest.pack_sequence_as( + inputs[0], flat_merged, expand_composites=True) + return (merged_inputs, chosen_index) + + +def _convert_tensorarray_to_flow(tensor_or_tensor_array): + if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray): + return tensor_or_tensor_array.flow + else: + return tensor_or_tensor_array + + +def _convert_flow_to_tensorarray(tensor_or_tensor_array, tensor_or_flow): + if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray): + return tensor_array_ops.build_ta_with_new_flow(tensor_or_tensor_array, + tensor_or_flow) + else: + return tensor_or_flow + + +def _convert_to_tensor_or_composite_or_tensorarray(var): + if isinstance(var, tensor_array_ops.TensorArray): + return var + return ops.convert_to_tensor_or_composite(var) + + +# TODO(xjun): replace this with is_subtype_of after it is landed. +def _ShapeLessThanOrEqual(shape1, shape2): + if shape2.dims is None: + return True + if shape1.ndims != shape2.ndims: + return False + for dim1, dim2 in zip(shape1.dims, shape2.dims): + if dim2.value is not None and dim1.value != dim2.value: + return False + return True + + +def _shape_invariant_to_type_spec(var, shape=None): + """Converts a shape invariant to a TypeSpec. + + If `var` is a TensorArray, it will first be converted to its flow. + + Args: + var: The tensor, tensor array or composite tensor whose shape is described + by the shape invariant. + shape: A `TypeSpec` or `TensorShape`. If `shape` is already a `TypeSpec`, + then it is simply returned as-is. + + Returns: + A `TypeSpec` for `var`, consistent with the given shape. + + Raises: + TypeError: If `shape` is a TypeSpec and not compatible with `var`. + TypeError: If `shape` is not None, a TypeSpec, or a TensorShape. + TypeError: If `shape` is a TensorShape, `var` is a CompositeTensor, and + `var` doesn't implement the `_shape_invariant_to_type_spec` method. + """ + var = _convert_tensorarray_to_flow(var) + if shape is None: + return type_spec.type_spec_from_value(var) + elif isinstance(shape, type_spec.TypeSpec): + if not shape.is_compatible_with(var): + raise TypeError("TypeSpec %r is not compatible with %r" % (shape, var)) + return shape + elif not isinstance(shape, tensor_shape.TensorShape): + raise TypeError( + "'shape' must be one of TypeSpec, TensorShape or None. " + f"Received: {type(shape)}") + + if isinstance(var, tensor_lib.Tensor): + return tensor_lib.TensorSpec(shape, var.dtype) + else: + try: + return var._shape_invariant_to_type_spec(shape) # pylint: disable=protected-access + except NotImplementedError as e: + raise TypeError( + f"To describe or constrain a {type(var).__name__}, use a " + f"{type(var._type_spec).__name__} instead of a TensorShape.") from e # pylint: disable=protected-access + + +def _EnforceShapeInvariant(merge_var, next_var): + """Check if the shapes of the loops variables are invariants. + + Args: + merge_var: The tensor representing the initial values of the loop + variables. + next_var: The tensor representing the values of the loop variables + after one loop iteration. + + Raises: + ValueError: If any tensor in `merge_var` has a more specific shape than + its corresponding tensor in `next_var`. + """ + if isinstance(merge_var, tensor_lib.Tensor): + m_shape = merge_var.get_shape() + n_shape = next_var.get_shape() + if not _ShapeLessThanOrEqual(n_shape, m_shape): + enter = merge_var.op.inputs[0].op + assert util.IsLoopEnter(enter) + input_t = enter.inputs[0] + raise ValueError( + "Input tensor '%s' enters the loop with shape %s, but has shape %s " + "after one iteration. To allow the shape to vary across iterations, " + "use the `shape_invariants` argument of tf.while_loop to specify a " + "less-specific shape." % (input_t.name, input_t.shape, n_shape)) + else: + raise TypeError("'merge_var' must be a Tensor. " + f"Received: {type(merge_var)}.") + + +def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True): + """Add NextIteration and back edge from v to m.""" + if isinstance(m, tensor_lib.Tensor): + v = ops.convert_to_tensor(v) + v = _NextIteration(v) + if enforce_shape_invariant: + # Make sure the shapes of loop outputs are correct. We do this before + # calling _update_input, which will raise a less-helpful error message if + # the types don't match. + # TODO(skyewm): call this for other cases below (needs testing) + _EnforceShapeInvariant(m, v) + m.op._update_input(1, v) # pylint: disable=protected-access + elif isinstance(m, composite_tensor.CompositeTensor): + # pylint: disable=protected-access + def update_component(m_component, v_component): + m_component.op._update_input(1, v_component) + + if isinstance(m, indexed_slices.IndexedSlices): + v = math_ops._as_indexed_slices(v, optimize=False) + # pylint: enable=protected-access + v = _NextIteration(v) + return nest.map_structure(update_component, m, v, expand_composites=True) + else: + raise TypeError("'m' must be a Tensor or CompositeTensor. " + f"Received: {type(m)}.") + return v + + +class ControlFlowContext(metaclass=abc.ABCMeta): + """The base class for control flow context. + + The usage pattern is a sequence of (Enter, Exit) followed by a final + ExitResult. + + We maintain the following state for control flow contexts during graph + construction: + 1. graph has _control_flow_context: the current context used to + construct new nodes. Changed by ctxt.Enter() and ctxt.Exit() + 2. op has _control_flow_context: the context to which the op belongs. + Set at the time the op is created. Immutable. + 3. A ControlFlowContext has _outer_context: the context in which this + context is created. Set at the time a context is created. Immutable. + 4. A ControlFlowContext has _context_stack. + Pushed and popped by ctxt.Enter() and ctxt.Exit() + """ + + def __init__(self, values_def=None, import_scope=None): + self._nested_contexts = [] + self._outer_context = ops.get_default_graph()._get_control_flow_context() + if self._outer_context: + self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access + self._context_stack = [] + if values_def: + self._init_values_from_proto(values_def, import_scope=import_scope) + else: + # The names of tensors that have been already seen in this context. + self._values = set() + # The keys are the names of tensors referenced by but external to this + # context. Each value is the Tensor that should be used by this context to + # access the key value (e.g. a switch output guarding a cond input value). + self._external_values = {} + + def _init_values_from_proto(self, values_def, import_scope=None): + """Initializes values and external_values from `ValuesDef` protocol buffer. + + Args: + values_def: `ValuesDef` protocol buffer. + import_scope: Optional `string`. Name scope to add. + """ + assert isinstance(values_def, control_flow_pb2.ValuesDef) + self._values = set( + ops.prepend_name_scope(value, import_scope) + for value in values_def.values) + g = ops.get_default_graph() + self._external_values = {} + for k, v in values_def.external_values.items(): + k = ops.prepend_name_scope(k, import_scope) + self._external_values[k] = g.as_graph_element( + ops.prepend_name_scope(v, import_scope)) + op_names = set([ + op.split(":")[0] + for op in self._values - set(self._external_values.keys()) + ]) + for op in op_names: + # pylint: disable=protected-access + g.as_graph_element(op)._set_control_flow_context(self) + # pylint: enable=protected-access + + @property + def name(self): + return self._name + + @property + def outer_context(self): + """Return the context containing this context.""" + return self._outer_context + + @property + def grad_state(self): + raise NotImplementedError("Abstract method") + + @property + def back_prop(self): + raise NotImplementedError("Abstract method") + + @abc.abstractmethod + def to_control_flow_context_def(self, context_def, export_scope=None): + """Serializes this into `context_def`. + + Args: + context_def: a `ControlFlowContextDef` protocol buffer. + export_scope: Optional `string`. Name scope to remove. + """ + raise NotImplementedError("Abstract method") + + def _to_values_def(self, export_scope=None): + """Converts the values to a `ValuesDef` protocol buffer. + + Args: + export_scope: Optional `string`. Name scope to remove. + + Returns: + A `ValuesDef` protocol buffer. + """ + values_def = control_flow_pb2.ValuesDef() + values_def.values.extend( + [ops.strip_name_scope(v, export_scope) for v in sorted(self._values)]) + for k, v in self._external_values.items(): + k = ops.strip_name_scope(k, export_scope) + values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope) + return values_def + + def AddName(self, name): + self._values.add(name) + + # pylint: disable=protected-access + def Enter(self): + """Enter this control flow context.""" + graph = ops.get_default_graph() + self._context_stack.append(graph._get_control_flow_context()) + graph._set_control_flow_context(self) + + def Exit(self): + """Exit this control flow context.""" + graph = ops.get_default_graph() + last_context = self._context_stack.pop() + graph._set_control_flow_context(last_context) + + def EnterGradientColocation(self, op: ops.Operation, gradient_uid): + """Start building a gradient colocated with an op.""" + if self._outer_context: + self._outer_context.EnterGradientColocation(op, gradient_uid) + + def ExitGradientColocation(self, op: ops.Operation, gradient_uid): + """Start building a gradient colocated with an op.""" + if self._outer_context: + self._outer_context.ExitGradientColocation(op, gradient_uid) + + def ExitResult(self, result): + """Make a list of tensors available in the outer context.""" + if self._outer_context: + def fn(x): + self._outer_context.AddName(x.name) + return x + nest.map_structure(fn, result, expand_composites=True) + + def GetWhileContext(self): + """Return the while context containing this context.""" + if self._outer_context: + return self._outer_context.GetWhileContext() + return None + + def _RemoveExternalControlEdges(self, op: ops.Operation): + """Remove any external control dependency on this op.""" + while_ctxt = self.GetWhileContext() + # A control input of `op` is internal if it is in the same while + # loop context as the enclosing while loop context of self. + if while_ctxt is None: + internal_control_inputs, external_control_inputs = op.control_inputs, [] + else: + internal_control_inputs, external_control_inputs = [], [] + for x in op.control_inputs: + ctxt = util.GetOutputContext(x) + if ctxt is not None and ctxt.GetWhileContext() == while_ctxt: + internal_control_inputs.append(x) + else: + external_control_inputs.append(x) + if len(internal_control_inputs) != len(op.control_inputs): + # TODO(mdan): perhaps there should be a replace_control_inputs() + op._remove_all_control_inputs() + op._add_control_inputs(internal_control_inputs) + return internal_control_inputs, external_control_inputs + + # pylint: enable=protected-access + + def AddInnerOp(self, op: ops.Operation): + """Notifies a scope about an operator added to an inner scope.""" + if self._outer_context: + self._outer_context.AddInnerOp(op) + + def GetControlPivot(self): + """Returns the pivot node for this context, or None.""" + return None + + def IsWhileContext(self): + return False + + def IsCondContext(self): + return False + + def IsXLAContext(self): + return False + + def __str__(self): + return self.name + + +class CondContext(ControlFlowContext): + """The context for the conditional construct.""" + + def __init__(self, + pred=None, + pivot=None, + branch=None, + name="cond_text", + context_def=None, + import_scope=None): + """Creates a `CondContext`. + + Args: + pred: The `boolean` tensor for the conditional predicate. + pivot: The predicate tensor in this branch. + branch: 0 or 1 representing this branch. + name: Name of the `CondContext` python object. + context_def: Optional `ContextDef` protocol buffer to initialize the + `CondContext` object from. + import_scope: Optional `string`. Name scope to add. Only used when + initialing from protocol buffer. + """ + self._name = ops.get_default_graph().unique_name(name) + + if context_def: + self._init_from_proto(context_def, import_scope=import_scope) + else: + # Initializes the default fields. + ControlFlowContext.__init__(self) + self._pred = pred # The boolean tensor for the cond predicate + self._pivot = pivot # The predicate tensor in this branch + self._branch = branch # 0 or 1 representing this branch + + # Values considered to have been already seen in this context. pred is not + # included in this context. + self._values.add(pred.name) + self._external_values[pred.name] = pred + self._values.add(pivot.name) + pivot.op._set_control_flow_context(self) # pylint: disable=protected-access + + def _init_from_proto(self, context_def, import_scope=None): + """Creates a new `CondContext` from protocol buffer. + + Args: + context_def: `CondContextDef` protocol buffer. + import_scope: Optional `string`. Name scope to add. + """ + assert isinstance(context_def, control_flow_pb2.CondContextDef) + # Create from context_def. + g = ops.get_default_graph() + self._name = ops.prepend_name_scope(context_def.context_name, import_scope) + self._pred = g.as_graph_element( + ops.prepend_name_scope(context_def.pred_name, import_scope)) + self._pivot = g.as_graph_element( + ops.prepend_name_scope(context_def.pivot_name, import_scope)) + self._branch = context_def.branch + super(CondContext, self).__init__( + values_def=context_def.values_def, import_scope=import_scope) + + @property + def pred(self): + return self._pred + + @property + def pivot(self): + return self._pivot + + @property + def branch(self): + return self._branch + + @property + def grad_state(self): + if self.GetWhileContext(): + return self.GetWhileContext().grad_state + return None + + @property + def back_prop(self): + if self.GetWhileContext(): + return self.GetWhileContext().back_prop + return False + + def GetControlPivot(self): + return self._pivot + + def to_proto(self, export_scope=None): + """Converts a `CondContext` to a `CondContextDef` protocol buffer. + + Args: + export_scope: Optional `string`. Name scope to remove. + + Returns: + A `CondContextDef` protocol buffer. + """ + if (export_scope is None or self.name.startswith(export_scope)): + context_def = control_flow_pb2.CondContextDef() + context_def.context_name = ops.strip_name_scope(self.name, export_scope) + context_def.pred_name = ops.strip_name_scope(self._pred.name, + export_scope) + context_def.pivot_name = ops.strip_name_scope(self._pivot.name, + export_scope) + context_def.branch = self._branch + context_def.values_def.MergeFrom( + super(CondContext, self)._to_values_def(export_scope)) + for nested in self._nested_contexts: + nested_def = context_def.nested_contexts.add() + nested.to_control_flow_context_def(nested_def) + + return context_def + else: + return None + + @staticmethod + def from_proto(context_def, import_scope=None): + """Returns a `CondContext` object created from `context_def`.""" + ret = CondContext(context_def=context_def, import_scope=import_scope) + + ret.Enter() + for nested_def in context_def.nested_contexts: + from_control_flow_context_def(nested_def, import_scope=import_scope) + ret.Exit() + return ret + + def to_control_flow_context_def(self, context_def, export_scope=None): + context_def.cond_ctxt.CopyFrom(self.to_proto(export_scope=export_scope)) + + def AddValue(self, val): + """Add `val` to the current context and its outer context recursively.""" + if val.name in self._values: + # Use the real value if it comes from outer context. This is needed in + # particular for nested conds. + result = self._external_values.get(val.name) + result = val if result is None else result + else: + result = val + self._values.add(val.name) + if self._outer_context: + result = self._outer_context.AddValue(val) + self._values.add(result.name) + self._external_values[result.name] = result + with ops.control_dependencies(None): + result = _SwitchRefOrTensor(result, self._pred)[self._branch] + if self._outer_context: + self._outer_context.AddInnerOp(result.op) + + result.op.graph.prevent_fetching(result.op) + # pylint: disable=protected-access + result.op._set_control_flow_context(self) + # pylint: enable=protected-access + + # Mark Switch output as seen by this context and any outer contexts, + # just like what we do for normal op outputs in _AddOpInternal() below. + ctxt = self + while ctxt is not None: + # pylint: disable=protected-access + ctxt._values.add(result.name) + ctxt = ctxt._outer_context + # pylint: enable=protected-access + + self._external_values[val.name] = result + return result + + def AddOp(self, op: ops.Operation): + self._AddOpInternal(op) + + def _AddOpInternal(self, op: ops.Operation): + """Add `op` to the current context.""" + if not op.inputs: + # If we're in a while loop, remove any control inputs from outside the + # loop. + self._RemoveExternalControlEdges(op) + + if not any( + util.OpInContext(input_op, self) for input_op in op.control_inputs): + # pylint: disable=protected-access + op._add_control_input(self._pivot.op) + # pylint: enable=protected-access + else: + # Make each input to 'op' available in this CondContext. If an input is + # already part of this context there's nothing to do, but if it's + # external, AddValue() will handle adding the appropriate Switch node and + # other bookkeeping. + for index in range(len(op.inputs)): + x = op.inputs[index] + if op.type == "Merge" and x.op.type == "NextIteration": + # Edge case: if we're importing a while loop inside this CondContext, + # AddValue() will not correctly handle the NextIteration inputs to + # Merge node. The problem is that the NextIteration should also be + # part of this context, but if we're importing it won't have been + # processed and added to the context yet, so AddValue() will try to + # add a Switch which results in an invalid graph. Instead, we use the + # NextIteration input as-is here, and it will eventually be added to + # the context via AddOp(). + real_x = x + else: + real_x = self.AddValue(x) + if real_x != x: + # pylint: disable=protected-access + op._update_input(index, real_x) + # pylint: enable=protected-access + # Remove any external control dependency on this op. + self._RemoveExternalControlEdges(op) + # pylint: disable=protected-access + if op.graph._is_function(op.type) or op.type == "SymbolicGradient": + op._add_control_input(self._pivot.op) + # pylint: enable=protected-access + + # Mark op's outputs as seen by this context and any outer contexts. + output_names = [x.name for x in op.outputs] + ctxt = self + while ctxt is not None: + # pylint: disable=protected-access + ctxt._values.update(output_names) + ctxt = ctxt._outer_context + # pylint: enable=protected-access + + if self._outer_context or not util.IsLoopExit(op): + op.graph.prevent_fetching(op) + + if self._outer_context: + self._outer_context.AddInnerOp(op) + + def _ProcessOutputTensor(self, val): + """Process an output tensor of a conditional branch.""" + real_val = val + if val.name not in self._values: + # Handle the special case of lambda: x + self._values.add(val.name) + if self._outer_context: + real_val = self._outer_context.AddValue(val) + self._values.add(real_val.name) + self._external_values[real_val.name] = real_val + real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch] + self._external_values[val.name] = real_val + else: + external_val = self._external_values.get(val.name) + if external_val is not None: + real_val = external_val + return real_val + + def _BuildCondTensor(self, v): + if isinstance(v, ops.Operation): + # Use pivot as the proxy for this op. + return with_dependencies([v], self._pivot) + else: + v = nest.map_structure( + _convert_tensorarray_to_flow, v, expand_composites=True) + return self._ProcessOutputTensor(ops.convert_to_tensor(v)) + + def BuildCondBranch(self, fn): + """Add the subgraph defined by fn() to the graph.""" + pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + original_result = fn() + post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + if len(post_summaries) > len(pre_summaries): + new_summaries = post_summaries[len(pre_summaries):] + summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + summary_ref[:] = pre_summaries + with ops.control_dependencies(new_summaries): + if original_result is None: + return no_op(), None + elif not isinstance(original_result, ops.Operation): + original_result = variable_utils.convert_variables_to_tensors( + original_result) + original_result = nest.map_structure( + array_ops.identity, original_result, expand_composites=True) + if original_result is None: + return None, None + + original_result = variable_utils.convert_variables_to_tensors( + original_result) + result = nest.map_structure( + self._BuildCondTensor, original_result, expand_composites=True) + if not isinstance(result, (list, _basetuple)): + result = [result] + return original_result, result + + def IsCondContext(self): + return True + + +# pylint: enable=g-doc-args +# pylint: enable=redefined-outer-name + + +def _resource_safe_shape(t): + """Returns the shape of t or the variable it points to.""" + if t.dtype == dtypes.resource: + while t.op.inputs: + t = t.op.inputs[0] + return tensor_shape.TensorShape(t.op.get_attr("shape")) + return array_ops.shape_internal(t, optimize=False) + + +# TODO(yuanbyu): Consider having a unified notion of context for +# not only conditionals and loops but also control dependency and +# subgraphs. +class WhileContext(ControlFlowContext): + """The context for the loop construct.""" + + def __init__(self, + maximum_iterations=None, + parallel_iterations=10, + back_prop=True, + swap_memory=False, + name="while_context", + grad_state=None, + context_def=None, + import_scope=None): + """"Creates a `WhileContext`. + + Args: + maximum_iterations: Optional upper bound on number of loop iterations. + parallel_iterations: The number of iterations allowed to run in parallel. + back_prop: Whether backprop is enabled for this while loop. + swap_memory: Whether GPU-CPU memory swap is enabled for this loop. + name: Optional name prefix for the returned tensors. + grad_state: The gradient loop state. + context_def: Optional `WhileContextDef` protocol buffer to initialize the + `Whilecontext` python object from. + import_scope: Optional `string`. Name scope to add. Only used when + initialing from protocol buffer. + """ + if context_def: + self._init_from_proto(context_def, import_scope=import_scope) + else: + ControlFlowContext.__init__(self) + self._init_from_args(maximum_iterations, parallel_iterations, back_prop, + swap_memory, name) + # The gradient loop state. + self._grad_state = grad_state + + def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, + swap_memory, name): + """Creates a new `WhileContext` from arguments. + + Args: + maximum_iterations: Optional upper bound on number of loop iterations. + parallel_iterations: The number of iterations allowed to run in parallel. + back_prop: Whether backprop is enabled for this while loop. + swap_memory: Whether GPU-CPU memory swap is enabled for this loop. + name: Optional name prefix for the returned tensors. + + Raises: + ValueError: If `parallel_iterations` has invalid value. + """ + if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0): + raise ValueError("'parallel_iterations' must be a positive integer: " + "%s" % parallel_iterations) + self._name = ops.get_default_graph().unique_name(name) + self._maximum_iterations = maximum_iterations + self._parallel_iterations = parallel_iterations + self._back_prop = back_prop + self._swap_memory = swap_memory + # We use this node to control constants created by the pred lambda. + self._pivot_for_pred = None + # We use this node to control constants created by the body lambda. + self._pivot_for_body = None + # The boolean tensor for loop termination condition. Used in code + # generation for gradient computation + self._pivot = None + # The list of exit tensors for loop variables. + self._loop_exits = [] + # The list of enter tensors for loop variables. + self._loop_enters = [] + self._graph = ops.get_default_graph() + + def _init_from_proto(self, context_def, import_scope=None): + """Creates a new `WhileContext` from protocol buffer. + + Args: + context_def: `WhileContextDef` protocol buffer. + import_scope: Optional `string`. Name scope to add. + """ + assert isinstance(context_def, control_flow_pb2.WhileContextDef) + # Create from context_def. + g = ops.get_default_graph() + self._name = ops.prepend_name_scope(context_def.context_name, import_scope) + if context_def.maximum_iterations_name: + self._maximum_iterations = g.as_graph_element( + ops.prepend_name_scope(context_def.maximum_iterations_name, + import_scope)) + else: + self._maximum_iterations = None + self._parallel_iterations = context_def.parallel_iterations + self._back_prop = context_def.back_prop + self._swap_memory = context_def.swap_memory + self._pivot_for_pred = g.as_graph_element( + ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope)) + # We use this node to control constants created by the body lambda. + self._pivot_for_body = g.as_graph_element( + ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope)) + # The boolean tensor for loop termination condition. Used in code + # generation for gradient computation. + self._pivot = g.as_graph_element( + ops.prepend_name_scope(context_def.pivot_name, import_scope)) + # The list of exit tensors for loop variables. + self._loop_exits = [ + g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope)) + for exit_name in context_def.loop_exit_names + ] + # The list of enter tensors for loop variables. + self._loop_enters = [ + g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope)) + for enter_name in context_def.loop_enter_names + ] + super(WhileContext, self).__init__( + values_def=context_def.values_def, import_scope=import_scope) + + # import_scope causes self.name to be different from the original serialized + # context's name. Rewrite "frame_name" attrs with the new name. + if import_scope: + for tensor_name in self._values: + op = g.as_graph_element(tensor_name).op + if util.IsLoopEnter(op): + # pylint: disable=protected-access + op._set_attr("frame_name", + attr_value_pb2.AttrValue(s=compat.as_bytes(self.name))) + # pylint: enable=protected-access + self._graph = ops.get_default_graph() + + @property + def maximum_iterations(self): + """The maximum number of iterations that will be executed.""" + return self._maximum_iterations + + @property + def parallel_iterations(self): + """The number of iterations allowed to run in parallel.""" + return self._parallel_iterations + + @property + def back_prop(self): + """True iff backprop is enabled for this while loop.""" + return self._back_prop + + @property + def swap_memory(self): + """True iff GPU-CPU memory swap is enabled for this while loop.""" + return self._swap_memory + + @property + def pivot(self): + """The boolean tensor representing the loop termination condition.""" + return self._pivot + + @property + def loop_enters(self): + """The list of enter tensors for loop variables.""" + return self._loop_enters + + @property + def loop_exits(self): + """The list of exit tensors for loop variables.""" + return self._loop_exits + + @property + def grad_state(self): + """The gradient loop state.""" + return self._grad_state + + def to_proto(self, export_scope=None): + """Converts a `WhileContext` to a `WhileContextDef` protocol buffer. + + Args: + export_scope: Optional `string`. Name scope to remove. + + Returns: + A `WhileContextDef` protocol buffer. + """ + if (export_scope is None or self.name.startswith(export_scope)): + context_def = control_flow_pb2.WhileContextDef() + context_def.context_name = ops.strip_name_scope(self.name, export_scope) + context_def.parallel_iterations = self._parallel_iterations + if self._maximum_iterations is not None: + context_def.maximum_iterations_name = ops.strip_name_scope( + self._maximum_iterations.name, export_scope) + context_def.back_prop = self._back_prop + context_def.swap_memory = self._swap_memory + context_def.pivot_for_pred_name = ops.strip_name_scope( + self._pivot_for_pred.name, export_scope) + context_def.pivot_for_body_name = ops.strip_name_scope( + self._pivot_for_body.name, export_scope) + context_def.pivot_name = ops.strip_name_scope(self._pivot.name, + export_scope) + context_def.loop_exit_names.extend([ + ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits + ]) + context_def.loop_enter_names.extend([ + ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters + ]) + context_def.values_def.MergeFrom( + super(WhileContext, self)._to_values_def(export_scope=export_scope)) + for nested in self._nested_contexts: + nested_def = context_def.nested_contexts.add() + nested.to_control_flow_context_def(nested_def) + + return context_def + else: + return None + + def to_control_flow_context_def(self, context_def, export_scope=None): + context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope)) + + @staticmethod + def from_proto(context_def, import_scope=None): + """Returns a `WhileContext` object created from `context_def`. + + Args: + context_def: A `WhileContextDef` protocol buffer. + import_scope: Optional `string`. Name scope to add. + + Returns: + A `WhileContext` Python object. + """ + ret = WhileContext(context_def=context_def, import_scope=import_scope) + ret.Enter() + for nested_def in context_def.nested_contexts: + from_control_flow_context_def(nested_def, import_scope=import_scope) + ret.Exit() + return ret + + def GetWhileContext(self): + return self + + def GetControlPivot(self): + if self._pivot_for_body is not None: + return self._pivot_for_body + return self._pivot_for_pred + + def AddValue(self, val): + """Add `val` to the current context and its outer context recursively.""" + result = val + new_value = val.name not in self._values + # Don't treat ops in this context as new values. Usually all known values + # are in self._values, except when we're importing a while loop inside this + # WhileContext. Since there's a cycle in this case, `val` may be part of the + # imported while loop but not yet processed by this context and added to + # self._values in _AddOpInternal. We only want to process external input + # tensors to the while loop here. + new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access + if new_value: + self._values.add(val.name) + + # If we are in a grad context and val is from its forward context, + # use GetRealValue(), which adds the logic to save the history of + # val in forward. + grad_ctxt = ops.get_default_graph()._get_control_flow_context() + if grad_ctxt: + grad_ctxt = grad_ctxt.GetWhileContext() + if grad_ctxt.grad_state: + forward_ctxt = util.GetWhileContext(val.op) + if util.IsLoopExit(val.op): + forward_ctxt = forward_ctxt.outer_context + if forward_ctxt: + forward_ctxt = forward_ctxt.GetWhileContext() + if forward_ctxt == grad_ctxt.grad_state.forward_context: + real_val = grad_ctxt.grad_state.GetRealValue(val) + self._external_values[val.name] = real_val + return real_val + + if self._outer_context is not None: + result = self._outer_context.AddValue(val) + # Create an Enter to make `result` known to this loop context. + with ops.control_dependencies(None): + enter = _Enter( + result, + self._name, + is_constant=True, + parallel_iterations=self._parallel_iterations) + enter.graph.prevent_feeding(enter) + if self._outer_context: + self._outer_context.AddInnerOp(enter.op) + # Fix the control inputs and control flow context of these enter ops. + self._FixControlInputsAndContext([enter]) + + # Add `enter` in this context. + self._values.add(enter.name) + self._external_values[val.name] = enter + result = enter + else: + actual_val = self._external_values.get(val.name) + if actual_val is not None: + result = actual_val + return result + + def AddOp(self, op: ops.Operation): + """Add `op` to the current context.""" + # For a reduction op, if op is in a grad context and its input is from + # its forward context, moving op to the forward context means we would + # store the tensor after the reduction as opposed to the tensor before + # reduction, and therefore could significantly reduce memory consumption. + # For now, we do this only for a few ops. + # + # If in XLA context, do not move constant ops to forward pass as pushing to + # and popping from a stack removes the constant property of an op and breaks + # XLA compilation, which requires certain inputs to be constant for certain + # ops. + if not util.IsInXLAContext(op) and op.type in {"Shape", "Size", "Rank"}: + grad_ctxt = ops.get_default_graph()._get_control_flow_context() + if grad_ctxt: + grad_ctxt = grad_ctxt.GetWhileContext() + if grad_ctxt.grad_state: + op_input_forward_ctxt = util.GetWhileContext(op.inputs[0].op) + if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context: + op_input_ctxt = op.inputs[0].op._get_control_flow_context() + op._set_control_flow_context(op_input_ctxt) + op_input_ctxt._AddOpInternal(op) + return + self._AddOpInternal(op) + + # pylint: disable=g-doc-args + def _AddOpInternal(self, op: ops.Operation): + """Add `op` to the current context. + + We move any external control dependencies of the op to the loop pivot, to + ensure they get executed. + """ + # This is needed to prevent frame mismatch errors where there are Const + # nodes inside tf.function in v1 while_loop and inlining is turned on. + if op.type in ["PartitionedCall", "StatefulPartitionedCall"]: + op._add_control_input(self.GetControlPivot().op) # pylint: disable=protected-access + if not op.inputs: + # Remove any external control dependency on this op + control_inputs, external_inputs = self._RemoveExternalControlEdges(op) + # Add a control edge from the control pivot to this op. + if not control_inputs: + # pylint: disable=protected-access + op._add_control_input(self.GetControlPivot().op) + # pylint: enable=protected-access + for x in op.outputs: + self._values.add(x.name) + else: + for index in range(len(op.inputs)): + x = op.inputs[index] + real_x = self.AddValue(x) + if real_x != x: + op._update_input(index, real_x) # pylint: disable=protected-access + # Remove any external control dependency on this op. + _, external_inputs = self._RemoveExternalControlEdges(op) + # Add a control dependency to prevent loop invariants from + # enabling ops that should not be executed. + self._MaybeAddControlDependency(op) + for x in op.outputs: + self._values.add(x.name) + if external_inputs: + # Use an identity to pull control inputs as data inputs. Note that we + # ignore ops which don't have outputs. TODO(apassos): fix that + with ops.control_dependencies(None): + self.Enter() + external_inputs = [ + array_ops.identity(x.outputs[0]).op + for x in external_inputs + if x.outputs + ] + self.Exit() + op._add_control_inputs(external_inputs) # pylint: disable=protected-access + if self._outer_context or not util.IsLoopExit(op): + op.graph.prevent_fetching(op) + for x in op.outputs: + op.graph.prevent_feeding(x) + + if self._outer_context: + self._outer_context.AddInnerOp(op) + + def _MaybeAddControlDependency(self, op: ops.Operation): + """Add a control input to the op if it only depends on loop invariants.""" + + def _IsOpFree(op): + """Determines if `op` needs a control dependency.""" + if op.control_inputs: + return False + # pylint: disable=protected-access + if op.graph._is_function(op.type) or op.type == "SymbolicGradient": + return True + # pylint: enable=protected-access + for x in op.inputs: + if not util.IsLoopConstantEnter(x.op): + return False + return True + + if _IsOpFree(op): + # pylint: disable=protected-access + op._add_control_input(self.GetControlPivot().op) + # pylint: enable=protected-access + + def AddForwardLoopCounter(self, outer_grad_state): + """Adds a loop that counts the number of iterations. + + This is added to the forward loop at the time when we start to + create the loop for backprop gradient computation. Called in + the outer context of this forward context. + + The pseudocode is: + `n = 0; while (_pivot) { n++; }` + + Note that a control dependency is added to `n` to ensure the correct + execution order of stack push ops. + + Args: + outer_grad_state: The outer grad state. None if not nested. + + Returns: + The number of iterations taken by the forward loop and the loop index. + """ + n = constant_op.constant(0, name="f_count") + if outer_grad_state is not None: + # Force the stack pushes of i-th execution of an inner loop to be ordered + # before the pushes of (i+1)-th execution of the same inner loop. + outer_add_op = outer_grad_state.forward_index.op.inputs[0].op + n.op._add_control_input(outer_add_op) # pylint: disable=protected-access + + self.Enter() + self.AddName(n.name) + enter_n = _Enter( + n, + self._name, + is_constant=False, + parallel_iterations=self._parallel_iterations, + name="f_count") + self.loop_enters.append(enter_n) + + merge_n = merge([enter_n, enter_n])[0] + switch_n = switch(merge_n, self._pivot) + + index = math_ops.add(switch_n[1], 1) + next_n = _NextIteration(index) + merge_n.op._update_input(1, next_n) + + total_iterations = exit(switch_n[0], name="f_count") + self.loop_exits.append(total_iterations) + self.ExitResult([total_iterations]) + self.Exit() + return total_iterations, next_n + + def AddBackpropLoopCounter(self, count, outer_grad_state): + """Add the backprop loop that controls the iterations. + + This is added to the backprop loop. It is used to control the loop + termination of the backprop loop. Called in the outer context of + this grad context. + + The pseudocode is: + `n = count; while (n >= 1) { n--; }` + + Note that a control dependency is added to `final_zero` to ensure the + correct execution order of stack pop ops. + + Args: + count: The number of iterations for backprop. + outer_grad_state: The outer grad state. None if not nested. + + Returns: + The loop index. + """ + in_separate_functions = count.graph is not ops.get_default_graph() + if in_separate_functions: + # Brings the count into this graph + count = array_ops.identity(count) + else: + # TODO(apassos) XLA expects this constant to be created outside the loop, + # so doing that for now. + one = constant_op.constant(1, name="b_count") + + self.Enter() + self.AddName(count.name) + enter_count = _Enter( + count, + self._name, + is_constant=False, + parallel_iterations=self._parallel_iterations, + name="b_count") + self.loop_enters.append(enter_count) + + merge_count = merge([enter_count, enter_count])[0] + self._pivot_for_pred = merge_count + + if in_separate_functions: + one = constant_op.constant(1, name="b_count") + pred = math_ops.greater_equal(merge_count, one) + self._pivot = loop_cond(pred, name="b_count") + switch_count = switch(merge_count, self._pivot) + + index = math_ops.subtract(switch_count[1], one) + self._pivot_for_body = index + next_count = _NextIteration(index) + merge_count.op._update_input(1, next_count) + + final_zero = exit(switch_count[0], name="b_count") + self.loop_exits.append(final_zero) + if outer_grad_state is not None: + # Force the stack pops of i-th execution of an inner loop to be ordered + # before the pops of (i+1)-th execution of the same inner loop. + # pylint: disable=protected-access + outer_grad_state.grad_sync._add_control_input(final_zero.op) + # pylint: enable=protected-access + + self.ExitResult([final_zero]) + self.Exit() + return next_count + + def AddBackpropAccumulator(self, op: ops.Operation, grad): + """Add an accumulation loop for every loop invariant. + + This is added to the backprop loop. It is used to accumulate partial + gradients within each loop iteration. Called when in the gradient while + context. + + The pseudocode is: + ``` + acc = 0.0; + while (_pivot) { + acc += grad; + } + ``` + + Args: + op: The Enter op for a loop invariant. + grad: The partial gradient of an iteration for a loop invariant. + + Returns: + The gradient for a loop invariant. + """ + self.Exit() + # Create a zeros tensor with the right shape for acc. If we don't + # know the full shape statically, we will have to get the shape + # dynamically from the forward inference. Getting the shape right + # for the zeros is only needed for the base case when the loop exits + # without running any iterations. + shape = grad.get_shape() + if shape.is_fully_defined(): + if self.outer_context: + self.outer_context.Enter() + acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc") + if self.outer_context: + self.outer_context.Exit() + else: + value = op.inputs[0] + if (isinstance(self.outer_context, WhileContext) and + self.outer_context.grad_state is not None): + # We are in a nested while loop. + forward_ctxt = self.grad_state.forward_context + forward_ctxt.outer_context.Enter() + zeros_shape = array_ops.shape_internal(value, optimize=False) + forward_ctxt.outer_context.Exit() + outer_grad_state = self.grad_state.outer_grad_state + history_zeros_shape = outer_grad_state.AddForwardAccumulator( + zeros_shape) + self.outer_context.Enter() + real_shape = outer_grad_state.AddBackpropAccumulatedValue( + history_zeros_shape, zeros_shape) + acc = array_ops.zeros(real_shape, grad.dtype) + self.outer_context.Exit() + else: + if self.outer_context: + self.outer_context.Enter() + zeros_shape = array_ops.shape_internal(value, optimize=False) + acc = array_ops.zeros(zeros_shape, grad.dtype) + if self.outer_context: + self.outer_context.Exit() + + self.Enter() + self.AddName(acc.name) + enter_acc = _Enter( + acc, + self._name, + is_constant=False, + parallel_iterations=self._parallel_iterations, + name="b_acc") + self.loop_enters.append(enter_acc) + + merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0] + switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot) + + add_acc = math_ops.add(switch_acc_true, grad) + next_acc = _NextIteration(add_acc) + merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access + + result_acc = exit(switch_acc_false, name="b_acc") + self.loop_exits.append(result_acc) + self.ExitResult([result_acc]) + return result_acc + + def AddBackpropIndexedSlicesAccumulator(self, op: ops.Operation, grad): + """This is used for accumulating gradients that are IndexedSlices. + + This is essentially the equivalent of AddBackpropAccumulator but optimized + for things like updating embeddings from within a while loop. + + Args: + op: The Enter op for a loop invariant. + grad: The partial gradients represented as an IndexedSlices. + + Returns: + The accumulated IndexedSlices gradient of the loop invariant. + """ + values = grad.values + indices = grad.indices + dense_shape = grad.dense_shape + + self.Exit() + if self.outer_context: + self.outer_context.Enter() + if values.get_shape().is_fully_defined(): + values_shape = tensor_shape.TensorShape([tensor_shape.Dimension(1)] + + values.get_shape().dims[1:]) + if self.outer_context: + self.outer_context.Enter() + values_acc = constant_op.constant( + 0, values.dtype, shape=values_shape, name="b_acc") + if self.outer_context: + self.outer_context.Exit() + else: + values_shape = _resource_safe_shape(op.inputs[0])[1:] + values_shape = array_ops.concat([[1], values_shape], 0) + values_acc = array_ops.zeros(values_shape, dtype=values.dtype) + indices_acc = constant_op.constant([0], indices.dtype) + shape_acc = None + if dense_shape is not None: + if dense_shape.get_shape().is_fully_defined(): + if self.outer_context: + self.outer_context.Enter() + shape_acc = constant_op.constant( + 0, dense_shape.dtype, shape=dense_shape.get_shape()) + if self.outer_context: + self.outer_context.Exit() + else: + shape_acc = array_ops.zeros_like( + array_ops.shape_internal( + op.inputs[0], optimize=False, out_type=dense_shape.dtype), + optimize=False) + + if self.outer_context: + self.outer_context.Exit() + + self.Enter() + self.AddName(values_acc.name) + self.AddName(indices_acc.name) + init_acc = [indices_acc, values_acc] + if shape_acc is not None: + self.AddName(shape_acc.name) + init_acc.append(shape_acc) + + # Set use_input_shape=False since the accumulator tensors will grow in + # size. If use_input_shape=True, the _update_input call below will result in + # incompatible shapes. + enter_acc = [ + _Enter( + x, + self._name, + is_constant=False, + parallel_iterations=self._parallel_iterations, + use_input_shape=False, + name="b_acc") for x in init_acc + ] + # Manually set appropriate partial shapes. + enter_acc[0].set_shape([None]) + if values_acc.shape.dims is not None: + enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:]) + self.loop_enters.extend(enter_acc) + + merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc] + switch_acc = [switch(x, self._pivot) for x in merge_acc] + + # The actual accumulation. + acc_indexed_slices = [ + array_ops.concat([xa[1], xv], 0) + for xa, xv in zip(switch_acc[:2], [indices, values]) + ] + if shape_acc is not None: + # For the shape we just keep the maximum + acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1])) + + next_acc = [_NextIteration(x) for x in acc_indexed_slices] + for xm, xn in zip(merge_acc, next_acc): + xm.op._update_input(1, xn) # pylint: disable=protected-access + + exit_acc = [exit(x[0], name="b_acc") for x in switch_acc] + self.loop_exits.extend(exit_acc) + + self.ExitResult(exit_acc) + return indexed_slices.IndexedSlices( + indices=exit_acc[0], + values=exit_acc[1], + dense_shape=exit_acc[2] if shape_acc is not None else None) + + def _InitializeValues(self, values): + """Makes the values known to this context.""" + self._values = set() + for x in values: + if isinstance(x, tensor_lib.Tensor): + self._values.add(x.name) + else: + raise TypeError("'values' must be a list of Tensors. " + f"Received: {type(x)}.") + + def _BuildLoop(self, pred, body, flat_orig_loop_vars, flat_loop_vars, + loop_vars_signature): + """Core: Add the loop termination condition and body to the graph.""" + flat_shape_invariants = nest.map_structure( + lambda spec: spec.shape, + nest.flatten(loop_vars_signature, expand_composites=True)) + + # Let the context know the loop variables so the loop variables + # would be added in the outer contexts properly. + self._InitializeValues(flat_loop_vars) + if self._outer_context: + real_vars = [self._outer_context.AddValue(x) for x in flat_loop_vars] + else: + real_vars = flat_loop_vars + + enter_vars = [] + with ops.control_dependencies(None): + for real_var, shape_invariant in zip(real_vars, flat_shape_invariants): + enter_var = _Enter( + real_var, + self._name, + is_constant=False, + parallel_iterations=self._parallel_iterations, + use_input_shape=False) + + if _ShapeLessThanOrEqual(real_var.get_shape(), shape_invariant): + enter_var.set_shape(shape_invariant) + else: + raise ValueError( + f"The shape invariant specified for {real_var.name} is not " + "compatible with the initial shape of the loop variable. It " + f"enters the loop with shape {real_var.get_shape()}, but the " + f"specified shape invariant is {shape_invariant}.") + + enter_var.graph.prevent_feeding(enter_var) + if self._outer_context: + self._outer_context.AddInnerOp(enter_var.op) + enter_vars.append(enter_var) + + # Finds the closest enclosing non-None control pivot. + outer_context = self._outer_context + control_pivot = None + while outer_context is not None and control_pivot is None: + control_pivot = outer_context.GetControlPivot() + # pylint: disable=protected-access + outer_context = outer_context._outer_context + # pylint: enable=protected-access + + if control_pivot is not None: + for var in enter_vars: + if util.IsLoopConstantEnter(var.op.inputs[0].op): + # pylint: disable=protected-access + var.op._add_control_input(control_pivot.op) + # pylint: enable=protected-access + + # Fix the control inputs and control flow context of these enter ops. + self._FixControlInputsAndContext(enter_vars) + self._InitializeValues(enter_vars) + self._loop_enters = enter_vars + + merge_vars = [merge([x, x])[0] for x in enter_vars] + self._pivot_for_pred = merge_vars[0] + + merge_vars_with_tensorarrays = nest.map_structure( + _convert_flow_to_tensorarray, flat_orig_loop_vars, merge_vars) + # Build the graph for pred. + packed_vars = nest.pack_sequence_as( + structure=loop_vars_signature, + flat_sequence=merge_vars_with_tensorarrays, + expand_composites=True) + c = ops.convert_to_tensor(pred(*packed_vars)) + self._pivot = loop_cond(c, name="LoopCond") + switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars] + + # Build the graph for body. + vars_for_body = [_Identity(x[1]) for x in switch_vars] + self._pivot_for_body = vars_for_body[0] + # Convert TensorArray flow variables inside the context back into + # their associated TensorArrays for calling the body. + vars_for_body_with_tensorarrays = nest.map_structure( + _convert_flow_to_tensorarray, flat_orig_loop_vars, vars_for_body) + packed_vars_for_body = nest.pack_sequence_as( + structure=loop_vars_signature, + flat_sequence=vars_for_body_with_tensorarrays, + expand_composites=True) + pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + body_result = body(*packed_vars_for_body) + post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + if not nest.is_nested(body_result): + body_result = [body_result] + if len(post_summaries) > len(pre_summaries): + new_summaries = post_summaries[len(pre_summaries):] + summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + summary_ref[:] = pre_summaries + with ops.control_dependencies(new_summaries): + + def map_fn(x): + # TODO(apassos) figure out how to trigger with tensor arrays as well + if isinstance(x, tensor_array_ops.TensorArray): + return x + return array_ops.identity(x) + + body_result = nest.map_structure( + map_fn, body_result, expand_composites=True) + + body_result = variable_utils.convert_variables_to_tensors(body_result) + # Compare the structure types of input and output of body. + # For backwards compatibility, the first layer is forced to a list + # during this comparison, because inputs are typically lists and + # outputs of the body are typically tuples. + nest.assert_same_structure( + list(packed_vars_for_body), list(body_result), expand_composites=True) + + # Store body_result to keep track of TensorArrays returned by body + original_body_result = body_result + # Convert TensorArrays returned by body into their flow variables + result = nest.map_structure( + _convert_tensorarray_to_flow, + nest.flatten(body_result, expand_composites=True), + expand_composites=True) + result = ops.convert_n_to_tensor_or_composite(result) + + # Add NextIteration and the back edges to complete the loop. + if len(merge_vars) != len(result): + raise ValueError("Number of inputs and outputs of 'body' must match " + f"'loop_vars'. Got {len(merge_vars)} for the number of " + f"inputs/outputs, and {len(result)} for 'loop_vars'.") + next_vars = [] + for m, v in zip(merge_vars, result): + next_vars.append(_AddNextAndBackEdge(m, v)) + + # Add the exit ops. + exit_vars = [exit(x[0]) for x in switch_vars] + self._loop_exits = exit_vars + + # Exit the loop. + self.ExitResult(exit_vars) + + return original_body_result, exit_vars + + def BuildLoop(self, pred, body, loop_vars, shape_invariants, + return_same_structure): + """Add the loop termination condition and body to the graph.""" + + # Keep flat_orig_loop_vars to identify which are TensorArrays + flat_orig_loop_vars = nest.flatten(loop_vars, expand_composites=True) + + loop_vars = nest.map_structure( + _convert_to_tensor_or_composite_or_tensorarray, loop_vars) + # Convert TensorArrays to their flow variables + flat_loop_vars = nest.map_structure( + _convert_tensorarray_to_flow, + nest.flatten(loop_vars, expand_composites=True)) + + if shape_invariants is not None: + loop_vars_signature = nest.map_structure( + _shape_invariant_to_type_spec, loop_vars, shape_invariants) + else: + loop_vars_signature = nest.map_structure( + _shape_invariant_to_type_spec, loop_vars) + + try: + self.Enter() + # _BuildLoop calls _update_input in several places. _mutation_lock() + # ensures a Session.run call cannot occur between creating and mutating + # new ops. + with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access + original_body_result, exit_vars = self._BuildLoop( + pred, body, flat_orig_loop_vars, flat_loop_vars, + loop_vars_signature) + finally: + self.Exit() + + flat_result = nest.flatten(original_body_result, expand_composites=True) + # Convert TensorArray flow variables outside the context back into + # their associated TensorArrays for returning to caller. + exit_vars_with_tensorarrays = nest.map_structure( + _convert_flow_to_tensorarray, flat_result, exit_vars) + + packed_exit_vars = nest.pack_sequence_as( + structure=original_body_result, + flat_sequence=exit_vars_with_tensorarrays, + expand_composites=True) + + if return_same_structure: + return packed_exit_vars + else: + return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars + + def _FixControlInputsAndContext(self, enters): + graph = ops.get_default_graph() + # pylint: disable=protected-access + for e in enters: + if isinstance(e, tensor_lib.Tensor): + xs = [e] + else: + raise TypeError("'enters' must be a list of Tensors. " + f"Received: {type(e)}.") + for x in xs: + inp_op = x.op.inputs[0].op + control_inputs = graph._control_dependencies_for_inputs([inp_op]) + outer_control_inputs = [] + for op in control_inputs: + # We need to keep control inputs that are in any ancestor + # ControlFlowContext, and within outer WhileContext. + keep_as_control_input = True + op_ctxt = util.GetOutputContext(op) + outer_ctxt = self.outer_context + outer_while_context = (None if outer_ctxt is None else + outer_ctxt.GetWhileContext()) + while outer_ctxt != op_ctxt: + if outer_ctxt is None or outer_ctxt == outer_while_context: + keep_as_control_input = False + break + outer_ctxt = outer_ctxt.outer_context + if keep_as_control_input: + outer_control_inputs.append(op) + x.op._set_control_flow_context(self) + x.op._add_control_inputs(outer_control_inputs) + graph._record_op_seen_by_control_dependencies(x.op) + # pylint: enable=protected-access + + def IsWhileContext(self): + return True + + +# pylint: enable=redefined-outer-name + + +def _AsTensorList(x, p): + """Return x as a list of Tensors or IndexedSlices. + + For entries of `x` that are Operations, this returns an Identity of `p` + with a dependency on the operation. + + Args: + x: A Tensor/IndexedSlices/Operation or a list or tuple of them. + p: A Tensor to return for entries in `x` that are Operations. + + Returns: + A list of Tensors or IndexedSlices. + """ + if not isinstance(x, (list, _basetuple)): + x = [x] + + l = [] + for v in x: + if isinstance(v, ops.Operation): + v = with_dependencies([v], p) + v = ops.convert_to_tensor_or_composite(v) + if isinstance(v, tensor_lib.Tensor): + l.append(array_ops.identity(v)) + else: + l.append( + indexed_slices.IndexedSlices( + array_ops.identity(v.values), array_ops.identity(v.indices))) + return l + + +def _CheckResults(a, b): + assert len(a) == len(b), ( + "Values returned by a() and b() must have the same length.") + for x, y in zip(a, b): + assert x.dtype == y.dtype, ( + "Values returned by a() [%s] and b() [%s] must have " + "the same type: %s, %s." % (x.name, y.name, x.dtype.name, y.dtype.name)) + + +def with_dependencies(dependencies, output_tensor, name=None): + """Produces the content of `output_tensor` only after `dependencies`. + + In some cases, a user may want the output of an operation to be + consumed externally only after some other dependencies have run + first. This function ensures returns `output_tensor`, but only after all + operations in `dependencies` have run. Note that this means that there is + no guarantee that `output_tensor` will be evaluated after any `dependencies` + have run. + + See also `tf.tuple` and `tf.group`. + + Args: + dependencies: Iterable of operations to run before this op finishes. + output_tensor: A `Tensor` or `IndexedSlices` that will be returned. + name: (Optional) A name for this operation. + + Returns: + Same as `output_tensor`. + + Raises: + TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`. + """ + if context.executing_eagerly(): + return output_tensor + with ops.name_scope(name, "control_dependency", + list(dependencies) + [output_tensor]) as name: + with ops.colocate_with(output_tensor): + with ops.control_dependencies(dependencies): + output_tensor = ops.convert_to_tensor_or_composite(output_tensor) + if isinstance(output_tensor, indexed_slices.IndexedSlices): + return indexed_slices.IndexedSlices( + _Identity(output_tensor.values, name=name), output_tensor.indices, + output_tensor.dense_shape) + else: + return _Identity(output_tensor, name=name) + + +def _GroupControlDeps(dev, deps, name=None): + with ops.control_dependencies(deps): + if dev is None: + return no_op(name=name) + else: + with ops.device(dev): + return no_op(name=name) + + +# TODO(touts): Accept "inputs" as a list. +@tf_export("group") +def group(*inputs, **kwargs): + """Create an op that groups multiple operations. + + When this op finishes, all ops in `inputs` have finished. This op has no + output. + + Note: *In TensorFlow 2 with eager and/or Autograph, you should not require + this method, as ops execute in the expected order thanks to automatic control + dependencies.* Only use `tf.group` when working with v1 + `tf.Graph` code. + + When operating in a v1-style graph context, ops are not executed in the same + order as specified in the code; TensorFlow will attempt to execute ops in + parallel or in an order convenient to the result it is computing. `tf.group` + allows you to request that one or more results finish before execution + continues. + + `tf.group` creates a single op (of type `NoOp`), and then adds appropriate + control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph + as this: + + with tf.control_dependencies([a, b]): + c = tf.no_op() + + See also `tf.tuple` and + `tf.control_dependencies`. + + Args: + *inputs: Zero or more tensors to group. + name: A name for this operation (optional). + + Returns: + An Operation that executes all its inputs. + + Raises: + ValueError: If an unknown keyword argument is provided. + """ + if context.executing_eagerly(): + return None + name = kwargs.pop("name", None) + if kwargs: + raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys())) + with ops.name_scope(name, "group_deps", inputs) as name: + # Grouping no inputs means do nothing + if not inputs: + return no_op(name=name) + + # Sorts *inputs according to their devices. + ops_on_device = {} # device -> operations specified on the device. + for inp in nest.flatten(inputs, expand_composites=True): + if not hasattr(inp, "device"): + raise TypeError("'inputs' should be zero or more (nested) Tensors. " + f"Received '{inp}' with type '{type(inp)}'.") + dev = inp.device + if dev in ops_on_device: + ops_on_device[dev].append(inp) + else: + ops_on_device[dev] = [inp] + if len(ops_on_device) == 1: + # 1-level tree. The root node is the returned NoOp node. + (dev, deps), = ops_on_device.items() + return _GroupControlDeps(dev, deps, name=name) + + # 2-level tree. The root node is the returned NoOp node. + # deps contains 1 NoOp node for each device. + deps = [] + + def device_key(dev): + """A sort key that allows None to be compared to strings.""" + return "" if dev is None else dev + + for dev in sorted(ops_on_device, key=device_key): + deps.append(_GroupControlDeps(dev, ops_on_device[dev])) + + with ops.control_dependencies(deps): + return no_op(name=name) + + +@tf_export("tuple", v1=[]) +@dispatch.add_dispatch_support +def tuple_v2(tensors, control_inputs=None, name=None): + """Groups tensors together. + + The returned tensors have the same value as the input tensors, but they + are computed only after all the input tensors have been computed. + + Note: *In TensorFlow 2 with eager and/or Autograph, you should not require + this method, as ops execute in the expected order thanks to automatic control + dependencies.* Only use `tf.tuple` when working with v1 `tf.Graph` code. + + See also `tf.group` and `tf.control_dependencies`. + + Example: + >>> with tf.Graph().as_default(): + ... with tf.compat.v1.Session() as sess: + ... v = tf.Variable(0.0) + ... a = tf.constant(1.0) + ... sess.run(tf.compat.v1.global_variables_initializer()) + ... for i in range(5): + ... update_op = v.assign_add(1.0) + ... b = a + v + ... res_b = sess.run(b) + ... res_v = sess.run(v) + ... print(res_v) + 0.0 + 0.0 + 0.0 + 0.0 + 0.0 + + >>> with tf.Graph().as_default(): + ... with tf.compat.v1.Session() as sess: + ... v = tf.Variable(0.0) + ... a = tf.constant(1.0) + ... sess.run(tf.compat.v1.global_variables_initializer()) + ... for i in range(5): + ... update_op = v.assign_add(1.0) + ... calc = [a + v] + ... # `tf.tuple` ensures `update_op` is run before `b` + ... b = tf.tuple(calc, [tf.group(update_op)]) + ... res_b = sess.run(b) + ... res_v = sess.run(v) + ... print(res_v) + 1.0 + 2.0 + 3.0 + 4.0 + 5.0 + + + Args: + tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. + control_inputs: List of additional ops to finish before returning. + name: (optional) A name to use as a `name_scope` for the operation. + + Returns: + Same as `tensors`. + + Raises: + ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. + TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` + objects. + + """ + return tuple(tensors=tensors, name=name, control_inputs=control_inputs) # pylint: disable=redefined-builtin + + +@tf_export(v1=["tuple"]) +@dispatch.add_dispatch_support +def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin + """Group tensors together. + + This creates a tuple of tensors with the same values as the `tensors` + argument, except that the value of each tensor is only returned after the + values of all tensors have been computed. + + `control_inputs` contains additional ops that have to finish before this op + finishes, but whose outputs are not returned. + + This can be used as a "join" mechanism for parallel computations: all the + argument tensors can be computed in parallel, but the values of any tensor + returned by `tuple` are only available after all the parallel computations + are done. + + See also `tf.group` and + `tf.control_dependencies`. + + Args: + tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. + name: (optional) A name to use as a `name_scope` for the operation. + control_inputs: List of additional ops to finish before returning. + + Returns: + Same as `tensors`. + + Raises: + ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. + TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` + objects. + + """ + if context.executing_eagerly(): + return tensors + with ops.name_scope(name, "tuple", tensors) as name: + tensors = [ + t if (isinstance(t, ops.Operation) or tensor_util.is_tf_type(t) or + t is None) else ops.convert_to_tensor(t) for t in tensors + ] + gating_ops = [ + t if isinstance(t, ops.Operation) else t.op + for t in tensors + if t is not None + ] + if control_inputs: + for c in control_inputs: + if isinstance(c, tensor_lib.Tensor): + c = c.op + elif not isinstance(c, ops.Operation): + raise TypeError( + "'control_inputs' must only contain Operation or Tensor. " + f"Received: {type(c)}") + gating_ops.append(c) + # Note that in order to ensure ordering in the pbtxt, we must take care to + # ensure the order here. + gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops. + if not gating_ops: + raise ValueError("'tensors' must have at least one Tensor. " + f"Received: {tensors}.") + gate = group(*gating_ops) + tpl = [] + for t in tensors: + if tensor_util.is_tf_type(t): + tpl.append(with_dependencies([gate], t)) + elif isinstance(t, ops.Operation): + with ops.control_dependencies([gate]): + tpl.append(group(t)) + else: + tpl.append(None) + return tpl + + +class XLAControlFlowContext(ControlFlowContext): + """Base class for XLA and TPU control flow contexts.""" + + def __init__(self): + super(XLAControlFlowContext, self).__init__() + self._name = "XLAControlFlowContext" + + def to_control_flow_context_def(self, context_def, export_scope=None): + # pylint: disable=useless-super-delegation + # NOTE(slebedev): the method is required by `ControlFlowContext`. + super(XLAControlFlowContext, + self).to_control_flow_context_def(context_def, export_scope) + + def IsXLAContext(self): + return True + + def AddOp(self, _): + pass + + def AddValue(self, x): + return x + + def RequiresUniqueFunctionRetracing(self): + """Returns whether the tf.function should be retraced if the context changes. + """ + return False + + +@tf_export("__internal__.get_enclosing_xla_context", v1=[]) +def get_enclosing_xla_context(): + """Recursively find and return the XLAControlFlowContext.""" + graph = ops.get_default_graph() + while graph is not None: + # pylint: disable=protected-access + context_ = graph._get_control_flow_context() + # pylint: enable=protected-access + while context_ is not None: + if isinstance(context_, XLAControlFlowContext): + return context_ + context_ = context_.outer_context + # This may be a FuncGraph due to defuns or v2 control flow. We need to + # find the original graph with the XLAControlFlowContext. + graph = getattr(graph, "outer_graph", None) + return None + + +def from_control_flow_context_def(context_def, import_scope=None): + """Deserializes `context_def` into the appropriate ControlFlowContext. + + Args: + context_def: ControlFlowContextDef proto + import_scope: Optional `string`. Name scope to add. + + Returns: + A ControlFlowContext subclass + """ + if context_def.HasField("cond_ctxt"): + return CondContext.from_proto( + context_def.cond_ctxt, import_scope=import_scope) + if context_def.HasField("while_ctxt"): + return WhileContext.from_proto( + context_def.while_ctxt, import_scope=import_scope) + raise NotImplementedError("Unknown ControlFlowContextDef field: %s" % + context_def.WhichOneof("ctxt")) + + +ops.register_proto_function( + ops.GraphKeys.COND_CONTEXT, + proto_type=control_flow_pb2.CondContextDef, + to_proto=CondContext.to_proto, + from_proto=CondContext.from_proto) + +ops.register_proto_function( + ops.GraphKeys.WHILE_CONTEXT, + proto_type=control_flow_pb2.WhileContextDef, + to_proto=WhileContext.to_proto, + from_proto=WhileContext.from_proto) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_switch_case.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_switch_case.py new file mode 100644 index 0000000000000000000000000000000000000000..843a088017df367122a4567683ed36f59dfc3a5b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_switch_case.py @@ -0,0 +1,253 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Switch case for Control Flow Operations.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond_v2 +from tensorflow.python.ops import control_flow_util as util +from tensorflow.python.ops import gen_functional_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.util.tf_export import tf_export + + +def _indexed_case_verify_and_canonicalize_args(branch_fns, default, + branch_index): + """Verifies input arguments for the case function. + + Args: + branch_fns: Dict or list of pairs of an `int` and a callable which returns a + list of tensors. + default: Optional callable that returns a list of tensors. + branch_index: Optional int `Tensor`, which selects for the corresponding + pred_fn_pair. + + Raises: + TypeError: If `branch_fns` is not a list/dictionary. + TypeError: If `branch_fns` is a list but does not contain 2-tuples or + callables. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + + Returns: + branch_fns: validated list of callables for each branch (default last). + """ + if not isinstance(branch_index, tensor.Tensor): + raise TypeError("'branch_index' must be a Tensor, got {}".format( + type(branch_index))) + if not branch_index.dtype.is_integer: + raise TypeError("'branch_index' must be an integer Tensor, got {}".format( + branch_index.dtype)) + + if not branch_fns: + raise ValueError("Must provide at least one item in 'branch_fns'") + if not isinstance(branch_fns, (list, tuple, dict)): + raise TypeError("'branch_fns' must be a list, tuple, or dict") + + if isinstance(branch_fns, dict): + branch_fns = branch_fns.items() + + if all(callable(fn) for fn in branch_fns): + branch_fns = list(enumerate(branch_fns)) + + for key_fn_pair in branch_fns: + if not isinstance(key_fn_pair, tuple) or len(key_fn_pair) != 2: + raise TypeError("Each entry in 'branch_fns' must be a 2-tuple. " + f"Received {key_fn_pair}.") + key, branch_fn = key_fn_pair + + if not isinstance(key, int): + raise TypeError("key must be a Python `int`, got {}".format(type(key))) + + if not callable(branch_fn): + raise TypeError("fn for key {} must be callable.".format(key)) + + keys = [p[0] for p in branch_fns] + if min(keys) < 0 or max(keys) >= len(keys) or len(set(keys)) != len(keys): + raise ValueError( + "branch indices (keys) must form contiguous range of [0 to {}) but " + "found {{{}}}".format(len(keys), ",".join(map(str, sorted(keys))))) + actions = [p[1] for p in sorted(branch_fns)] + if default is not None: + actions.append(default) + return actions + + +def _indexed_case_helper(branch_fns, + default, + branch_index, + name, + lower_using_switch_merge=None): + """Implementation of case that emits the n-way indexed Case op. + + Args: + branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable + which returns a list of tensors. + default: Optional callable that returns a list of tensors. + branch_index: Optional int `Tensor`, which selects for the corresponding + pred_fn_pair. + name: A name for this operation (optional). + lower_using_switch_merge: Lower this op using switch merge ops (optional). + + Returns: + The tensors returned by the pair whose key matched branch_index, or + those returned by `default` if none does. + + Raises: + TypeError: If `branch_fns` is not a list/dictionary. + TypeError: If `branch_fns` is a list but does not contain 2-tuples or + callables. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + """ + branch_fns = _indexed_case_verify_and_canonicalize_args( + branch_fns, default, branch_index) + with ops.name_scope(name, "case", [branch_index]): + if context.executing_eagerly() and not hasattr(branch_index, "graph"): + branch_index = array_ops.where( + math_ops.less(branch_index, 0) + | math_ops.greater_equal(branch_index, len(branch_fns)), + len(branch_fns) - 1, branch_index) + return branch_fns[int(branch_index)]() + return cond_v2.indexed_case( + branch_index, + branch_fns, + lower_using_switch_merge=lower_using_switch_merge) + + +@tf_export("__internal__.execute_fn_for_device", v1=[]) +def execute_fn_for_device(device_branch_fns, default_fn, name="execute_fn"): + """Executes one of the provided callables based on the device placement. + + This API is used when the implementations for high level function depend on + the underlying device placement. It takes a dictionary of device type to + callables. The device type includes "CPU", "GPU", "TPU", etc. When the type of + the device where to run this op matches the key in 'device_branch_fns', + the corresponding callable is executed, falling back to 'default_fn' if none + matches. + + **Example:** + ```python + def f1(): return tf.constant(1) + def f2(): return tf.constant(2) + r = tf.execute_fn_for_device({"CPU": f1, "GPU": f2}, default_fn=f1) + ``` + 'r' is evaluated as 1 when it runs on CPU, 2 running on GPU, 1 running on + any other device types. + + + Args: + device_branch_fns: a dictionary of device types to the callables. Each + callable must return a matching structure of tensors. + default_fn: fallback callable when the underlying device does not match any + key in the 'device_branch_fns'. + name: A name for this operation (optional). + + Returns: + The tensors returned by the callable identified by device type during + execution, or those returned by 'default_fn' if no key matches. + """ + # Always execute the default fn for XLA to avoid complicated graph by case op. + # see more discussions in b/167276293. + is_in_xla = util.GraphOrParentsInXlaContext(ops.get_default_graph()) + if is_in_xla: + return default_fn() + device_branch_fns_upper = {k.upper(): v for k, v in device_branch_fns.items()} + branch_fns = list(device_branch_fns_upper.values()) + devices = list(device_branch_fns_upper.keys()) + device_index = gen_functional_ops.device_index(device_names=devices) + return _indexed_case_helper( + branch_fns, + default_fn, + device_index, + name, + lower_using_switch_merge=False) + + +@tf_export("switch_case") +def switch_case(branch_index, branch_fns, default=None, name="switch_case"): + """Create a switch/case operation, i.e. + + an integer-indexed conditional. + + See also `tf.case`. + + This op can be substantially more efficient than `tf.case` when exactly one + branch will be selected. `tf.switch_case` is more like a C++ switch/case + statement than `tf.case`, which is more like an if/elif/elif/else chain. + + The `branch_fns` parameter is either a dict from `int` to callables, or list + of (`int`, callable) pairs, or simply a list of callables (in which case the + index is implicitly the key). The `branch_index` `Tensor` is used to select an + element in `branch_fns` with matching `int` key, falling back to `default` + if none match, or `max(keys)` if no `default` is provided. The keys must form + a contiguous set from `0` to `len(branch_fns) - 1`. + + `tf.switch_case` supports nested structures as implemented in `tf.nest`. All + callables must return the same (possibly nested) value structure of lists, + tuples, and/or named tuples. + + **Example:** + + Pseudocode: + + ```c++ + switch (branch_index) { // c-style switch + case 0: return 17; + case 1: return 31; + default: return -1; + } + ``` + or + ```python + branches = {0: lambda: 17, 1: lambda: 31} + branches.get(branch_index, lambda: -1)() + ``` + + Expressions: + + ```python + def f1(): return tf.constant(17) + def f2(): return tf.constant(31) + def f3(): return tf.constant(-1) + r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3) + # Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3}) + ``` + + Args: + branch_index: An int Tensor specifying which of `branch_fns` should be + executed. + branch_fns: A `dict` mapping `int`s to callables, or a `list` of (`int`, + callable) pairs, or simply a list of callables (in which case the index + serves as the key). Each callable must return a matching structure of + tensors. + default: Optional callable that returns a structure of tensors. + name: A name for this operation (optional). + + Returns: + The tensors returned by the callable identified by `branch_index`, or those + returned by `default` if no key matches and `default` was provided, or those + returned by the max-keyed `branch_fn` if no `default` is provided. + + Raises: + TypeError: If `branch_fns` is not a list/dictionary. + TypeError: If `branch_fns` is a list but does not contain 2-tuples or + callables. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + """ + return _indexed_case_helper(branch_fns, default, branch_index, name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_func_graphs.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_func_graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..48bfac5239590789c2a5081cecf2fc6a003c5c97 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_func_graphs.py @@ -0,0 +1,56 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""FuncGraphs for V2 control flow.""" + +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import ops + + +class ControlFlowFuncGraph(func_graph.FuncGraph): + """Contains control flow-specific FuncGraph logic.""" + + def __init__(self, *args, **kwargs): + super(ControlFlowFuncGraph, self).__init__(*args, **kwargs) + outer_graph = self.outer_graph + # Unlike tf.function, control flow FuncGraphs are generally created one per + # op. This means hard-coding any outer device scopes in the body (rather + # than inspecting the call-time placement of the control flow op) makes + # sense. + self._device_function_stack = outer_graph._device_function_stack.copy() # pylint: disable=protected-access + self.is_control_flow_graph = True + if ops.executing_eagerly_outside_functions(): + func_graph.override_func_graph_name_scope( + self, self.outer_graph.get_name_scope()) + + +class CondBranchFuncGraph(ControlFlowFuncGraph): + """FuncGraph for branches of tf.cond(). + + This is used to distinguish cond branches from other functions. + """ + + +class WhileCondFuncGraph(ControlFlowFuncGraph): + """FuncGraph for the condition of tf.while_loop(). + + This is used to distinguish while conditions from other functions. + """ + + +class WhileBodyFuncGraph(ControlFlowFuncGraph): + """FuncGraph for the body of tf.while_loop(). + + This is used to distinguish while bodies from other functions. + """ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_toggles.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_toggles.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce7d59bcdd69896ed047f0c7226ffd6cba17b93 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_v2_toggles.py @@ -0,0 +1,69 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""API for enabling v2 control flow.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["enable_control_flow_v2"]) +def enable_control_flow_v2(): # pylint: disable=invalid-name + """Use control flow v2. + + control flow v2 (cfv2) is an improved version of control flow in TensorFlow + with support for higher order derivatives. Enabling cfv2 will change the + graph/function representation of control flow, e.g., `tf.while_loop` and + `tf.cond` will generate functional `While` and `If` ops instead of low-level + `Switch`, `Merge` etc. ops. Note: Importing and running graphs exported + with old control flow will still be supported. + + Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0 + feature. + + Note: v2 control flow is always enabled inside of tf.function. Calling this + function is not required. + """ + # pylint: disable=protected-access + logging.vlog(1, "Enabling control flow v2") + ops._control_flow_api_gauge.get_cell().set(True) + control_flow_util.ENABLE_CONTROL_FLOW_V2 = True + + +@tf_export(v1=["disable_control_flow_v2"]) +def disable_control_flow_v2(): # pylint: disable=invalid-name + """Opts out of control flow v2. + + Note: v2 control flow is always enabled inside of tf.function. Calling this + function has no effect in that case. + + If your code needs tf.disable_control_flow_v2() to be called to work + properly please file a bug. + """ + # pylint: disable=protected-access + logging.vlog(1, "Disabling control flow v2") + ops._control_flow_api_gauge.get_cell().set(False) + control_flow_util.ENABLE_CONTROL_FLOW_V2 = False + + +@tf_export(v1=["control_flow_v2_enabled"]) +def control_flow_v2_enabled(): # pylint: disable=invalid-name + """Returns `True` if v2 control flow is enabled. + + Note: v2 control flow is always enabled inside of tf.function. + """ + return control_flow_util.EnableControlFlowV2(ops.get_default_graph()) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/custom_gradient.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/custom_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd5cad6947b31a39d9915ce2ba18cc17a985885 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/custom_gradient.py @@ -0,0 +1,823 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Decorator to overrides the gradient for a function.""" + +from tensorflow.python.eager import backprop +from tensorflow.python.eager import context +from tensorflow.python.eager import record +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import handle_data_util +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import op_selector +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import nest +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect +from tensorflow.python.util import variable_utils +from tensorflow.python.util.tf_export import tf_export + + +VAR_OP_TYPES = [ + "VariableV2", + "VarHandleOp", +] + + +@tf_export("custom_gradient") +def custom_gradient(f=None): + """Decorator to define a function with a custom gradient. + + This decorator allows fine grained control over the gradients of a sequence + for operations. This may be useful for multiple reasons, including providing + a more efficient or numerically stable gradient for a sequence of operations. + + For example, consider the following function that commonly occurs in the + computation of cross entropy and log likelihoods: + + ```python + def log1pexp(x): + return tf.math.log(1 + tf.exp(x)) + ``` + + Due to numerical instability, the gradient of this function evaluated at x=100 + is NaN. For example: + + ```python + with tf.GradientTape() as tape: + tape.watch(x) + y=log1pexp(x) + dy_dx = tape.gradient(y, x) # Will be NaN when evaluated. + ``` + + The gradient expression can be analytically simplified to provide numerical + stability: + + ```python + @tf.custom_gradient + def log1pexp(x): + e = tf.exp(x) + def grad(upstream): + return upstream * (1 - 1 / (1 + e)) + return tf.math.log(1 + e), grad + ``` + + With this definition, the gradient `dy_dx` at `x = 100` will be correctly + evaluated as 1.0. + + The variable `upstream` is defined as the upstream gradient. i.e. the gradient + from all the layers or functions originating from this layer. The above + example has no upstream functions, therefore `upstream = dy/dy = 1.0`. + + Assume that `x_i` is `log1pexp` in the forward pass `x_1 = x_1(x_0)`, + `x_2 = x_2(x_1)`, ..., `x_i = x_i(x_i-1)`, ..., `x_n = x_n(x_n-1)`. By + chain rule we know that `dx_n/dx_0 = dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... * + dx_i/dx_i-1 * ... * dx_1/dx_0`. + + In this case the gradient of our current function defined as + `dx_i/dx_i-1 = (exp(x_i) / (1 + exp(x_i))) = (1 - 1 / (1 + exp(x_i)))`. The + upstream gradient `upstream` would be `dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... * + dx_i+1/dx_i`. The upstream gradient multiplied by the current gradient is + then passed downstream. + + In case the function takes multiple variables as input, the `grad` + function must also return the same number of variables. + We take the function `z = x * y` as an example. + + >>> @tf.custom_gradient + ... def bar(x, y): + ... def grad(upstream): + ... dz_dx = y + ... dz_dy = x + ... return upstream * dz_dx, upstream * dz_dy + ... z = x * y + ... return z, grad + >>> x = tf.constant(2.0, dtype=tf.float32) + >>> y = tf.constant(3.0, dtype=tf.float32) + >>> with tf.GradientTape(persistent=True) as tape: + ... tape.watch(x) + ... tape.watch(y) + ... z = bar(x, y) + >>> z + + >>> tape.gradient(z, x) + + >>> tape.gradient(z, y) + + + Nesting custom gradients can lead to unintuitive results. The default + behavior does not correspond to n-th order derivatives. For example + + ```python + @tf.custom_gradient + def op(x): + y = op1(x) + @tf.custom_gradient + def grad_fn(dy): + gdy = op2(x, y, dy) + def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x. + return op3(x, y, dy, ddy) + return gdy, grad_grad_fn + return y, grad_fn + ``` + + The function `grad_grad_fn` will be calculating the first order gradient + of `grad_fn` with respect to `dy`, which is used to generate forward-mode + gradient graphs from backward-mode gradient graphs, but is not the same as + the second order gradient of `op` with respect to `x`. + + Instead, wrap nested `@tf.custom_gradients` in another function: + + ```python + @tf.custom_gradient + def op_with_fused_backprop(x): + y, x_grad = fused_op(x) + def first_order_gradient(dy): + @tf.custom_gradient + def first_order_custom(unused_x): + def second_order_and_transpose(ddy): + return second_order_for_x(...), gradient_wrt_dy(...) + return x_grad, second_order_and_transpose + return dy * first_order_custom(x) + return y, first_order_gradient + ``` + + Additional arguments to the inner `@tf.custom_gradient`-decorated function + control the expected return values of the innermost function. + + The examples above illustrate how to specify custom gradients for functions + which do not read from variables. The following example uses variables, which + require special handling because they are effectively inputs of the forward + function. + + >>> weights = tf.Variable(tf.ones([2])) # Trainable variable weights + >>> @tf.custom_gradient + ... def linear_poly(x): + ... # Creating polynomial + ... poly = weights[1] * x + weights[0] + ... + ... def grad_fn(dpoly, variables): + ... # dy/dx = weights[1] and we need to left multiply dpoly + ... grad_xs = dpoly * weights[1] # Scalar gradient + ... + ... grad_vars = [] # To store gradients of passed variables + ... assert variables is not None + ... assert len(variables) == 1 + ... assert variables[0] is weights + ... # Manually computing dy/dweights + ... dy_dw = dpoly * tf.stack([x ** 1, x ** 0]) + ... grad_vars.append( + ... tf.reduce_sum(tf.reshape(dy_dw, [2, -1]), axis=1) + ... ) + ... return grad_xs, grad_vars + ... return poly, grad_fn + >>> x = tf.constant([1., 2., 3.]) + >>> with tf.GradientTape(persistent=True) as tape: + ... tape.watch(x) + ... poly = linear_poly(x) + >>> poly # poly = x + 1 + + >>> tape.gradient(poly, x) # conventional scalar gradient dy/dx + + >>> tape.gradient(poly, weights) + + + Above example illustrates usage of trainable variable `weights`. + In the example, the inner `grad_fn` accepts an extra `variables` input + parameter and also returns an extra `grad_vars` output. That extra argument + is passed if the forward function reads any variables. You need to + compute the gradient w.r.t. each of those `variables` and output it as a list + of `grad_vars`. Note here that default value of `variables` is set to `None` + when no variables are used in the forward function. + + It should be noted `tf.GradientTape` is still watching the forward pass of a + `tf.custom_gradient`, and will use the ops it watches. As a consequence, + calling `tf.function` while the tape is still watching leads + to a gradient graph being built. If an op is used in `tf.function` without + registered gradient, a `LookupError` will be raised. + + Users can insert `tf.stop_gradient` to customize this behavior. This + is demonstrated in the example below. `tf.random.shuffle` does not have a + registered gradient. As a result `tf.stop_gradient` is used to avoid the + `LookupError`. + + ```python + x = tf.constant([0.3, 0.5], dtype=tf.float32) + + @tf.custom_gradient + def test_func_with_stop_grad(x): + @tf.function + def _inner_func(): + # Avoid exception during the forward pass + return tf.stop_gradient(tf.random.shuffle(x)) + # return tf.random.shuffle(x) # This will raise + + res = _inner_func() + def grad(upstream): + return upstream # Arbitrarily defined custom gradient + return res, grad + + with tf.GradientTape() as g: + g.watch(x) + res = test_func_with_stop_grad(x) + + g.gradient(res, x) + ``` + + See also `tf.RegisterGradient` which registers a gradient function for a + primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows + for fine grained control over the gradient computation of a sequence of + operations. + + Note that if the decorated function uses `Variable`s, the enclosing variable + scope must be using + [ResourceVariables](https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables). + + Args: + f: function `f(*x)` that returns a tuple `(y, grad_fn)` where: - `x` is a + sequence of (nested structures of) `Tensor` inputs to the function. - `y` + is a (nested structure of) `Tensor` outputs of applying TensorFlow + operations in `f` to `x`. - `grad_fn` is a function with the signature + `g(*grad_ys)` which returns a list of `Tensor`s the same size as + (flattened) `x` - the derivatives of `Tensor`s in `y` with respect to the + `Tensor`s in `x`. `grad_ys` is a sequence of `Tensor`s the same size as + (flattened) `y` holding the initial value gradients for each `Tensor` in + `y`. In a pure mathematical sense, a vector-argument vector-valued + function `f`'s derivatives should be its Jacobian matrix `J`. Here we are + expressing the Jacobian `J` as a function `grad_fn` which defines how `J` + will transform a vector `grad_ys` when left-multiplied with it (`grad_ys * + J`, the vector-Jacobian product, or VJP). This functional representation + of a matrix is convenient to use for chain-rule calculation (in e.g. the + back-propagation algorithm). If `f` uses `Variable`s (that are not part + of the inputs), i.e. through `get_variable`, then `grad_fn` should have + signature `g(*grad_ys, variables=None)`, where `variables` is a list of + the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where + `grad_xs` is the same as above, and `grad_vars` is a `list` with + the derivatives of `Tensor`s in `y` with respect to the variables (that + is, grad_vars has one Tensor per variable in variables). + + Returns: + A function `h(x)` which returns the same value as `f(x)[0]` and whose + gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`. + """ + + if f is None: + return lambda f: custom_gradient(f=f) + + @Bind.decorator + def decorated(wrapped, args, kwargs): + """Decorated function with custom gradient.""" + if context.executing_eagerly(): + return _eager_mode_decorator(wrapped, args, kwargs) + else: + return _graph_mode_decorator(wrapped, args, kwargs) + + return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter + + +class Bind: + """When called evaluates `d(f, args, kwargs)` but supports binding `f`. + + >>> @Bind.decorator + ... def my_decorator(f, args, kwargs): + ... print("my_decorator called with", args, kwargs) + ... return f(*args, **kwargs) + + >>> class Foo: + ... @my_decorator + ... def bar(self, a, b, c): + ... return a * b * c + + >>> Foo.bar(None, 1, 2, c=3) + my_decorator called with (None, 1, 2) {'c': 3} + 6 + + >>> foo = Foo() + >>> foo.bar(1, 2, c=3) + my_decorator called with (1, 2) {'c': 3} + 6 + """ + + @classmethod + def decorator(cls, d): + return lambda f: Bind(f, d) + + def __init__(self, f, d): + self._f = f + self._d = d + + def __get__(self, instance, owner): + if instance is not None: + f = self._f.__get__(instance, owner) + return tf_decorator.make_decorator(f, Bind(f, self._d)) + else: + return self + + def __call__(self, *a, **k): + return self._d(self._f, a, k) + + +def get_variable_by_name(var_name): + """Given a variable name, retrieves a handle on the tensorflow Variable.""" + global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) + + def _filter_fn(item): + try: + return var_name == item.op.name + except AttributeError: + # Collection items without operation are ignored. + return False + + candidate_vars = list(filter(_filter_fn, global_vars)) + + if len(candidate_vars) >= 1: + # Filter out non-trainable variables. + candidate_vars = [v for v in candidate_vars if v.trainable] + else: + raise ValueError("Unsuccessful at finding variable {}.".format(var_name)) + + if len(candidate_vars) == 1: + return candidate_vars[0] + elif len(candidate_vars) > 1: + raise ValueError( + "Unsuccessful at finding trainable variable {}. " + "Number of candidates: {}. " + "Candidates: {}".format(var_name, len(candidate_vars), candidate_vars)) + else: + # The variable is not trainable. + return None + + +def _get_dependent_variables(input_ops, output_ops): + """Finds variables involved in the subgraph between input_ops and output_ops. + + Args: + input_ops: Flattened list of input ops + output_ops: Flattened list of output ops + + Returns: + A list of variables + """ + + # avoids the edge-case when input_ops == output_ops. + output_ops = nest.map_structure(gen_array_ops.identity, output_ops) + inbetween_ops = op_selector.get_backward_walk_ops( + seed_ops=output_ops, + stop_at_ts=input_ops, + inclusive=False, + only_differentiable=True) + var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES) + var_names = (op.name for op in var_ops) + tf_vars = (get_variable_by_name(var_name) for var_name in var_names) + tf_vars = [v for v in tf_vars if v is not None] + return tf_vars + + +def generate_name(): + return "CustomGradient-%s" % ops.uid() + + +def _graph_mode_decorator(f, args, kwargs): + """Implement custom gradient decorator for graph mode.""" + # TODO(rsepassi): Add support for kwargs + if kwargs: + raise ValueError( + "The custom_gradient decorator currently supports keywords " + "arguments only when eager execution is enabled.") + name = generate_name() + args = variable_utils.convert_variables_to_tensors(args) + args = nest.map_structure(ops.convert_to_tensor, args, expand_composites=True) + + # Checking global and local variables attempts to ensure that no non-resource + # Variables are added to the graph. + current_var_scope = variable_scope.get_variable_scope() + before_vars = set([ + v.ref() for v in current_var_scope.global_variables() + + current_var_scope.local_variables() + ]) + with record.VariableWatcher() as variable_watcher: + result, grad_fn = f(*args) + + flat_args = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(args)) + flat_result = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(result)) + flat_result_len = len(flat_result) + + after_vars = set([ + v.ref() for v in current_var_scope.global_variables() + + current_var_scope.local_variables() + ]) + new_vars = after_vars - before_vars + new_vars_list = [v.deref() for v in new_vars] + for v in new_vars_list: + if not resource_variable_ops.is_resource_variable(v): + raise TypeError( + "All variables used by a function wrapped with @custom_gradient must " + "be `ResourceVariable`s. Ensure that no `variable_scope` is created " + "with `use_resource=False`.") + + # The variables that grad_fn needs to return gradients for are the set of + # variables used that are *not* part of the inputs. + variables_in_tape = frozenset([ + v.ref() for v in variable_watcher.watched_variables() + ]) + + graphs = {getattr(o, "graph", None) for o in flat_result} + # Not all results may be tensors. However, we want to ensure all tensor + # outputs are from the same graph and get a list of captured inputs for + # variable search + graphs.discard(None) # Discard non-graph outputs + if graphs: + if len(graphs) > 1: + raise ValueError( + "All custom_gradient outputs should be from the same graph") + output_graph = graphs.pop() + filtered_input_tensors = [] + for i in flat_args: + if i.graph == output_graph: + filtered_input_tensors.append(i) + else: + filtered_input_tensors = flat_args + + variables_in_subgraph = frozenset([ + v.ref() for v in _get_dependent_variables( + input_ops=filtered_input_tensors, output_ops=flat_result) + ]) + variables = sorted( + [v.deref() for v in variables_in_subgraph.union(variables_in_tape)], + key=lambda v: v.name) + + grad_argspec = tf_inspect.getfullargspec(grad_fn) + variables_in_signature = ("variables" in grad_argspec.args or + "variables" in grad_argspec.kwonlyargs or + grad_argspec.varkw) + if variables and not variables_in_signature: + raise TypeError( + "@tf.custom_gradient grad_fn must accept keyword argument 'variables', " + "since function uses variables: {}".format(variables)) + if variables_in_signature and not variables: + # User seems to intend to use variables but none were captured. + logging.vlog( + 1, "@custom_gradient grad_fn has 'variables' in signature, " + "but no ResourceVariables were used on the forward pass.") + + all_tensors = flat_result + flat_args + variables + + def tape_grad_fn(*result_grad_components): + """Custom grad fn wrapper.""" + result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients( + nest.flatten(result), result_grad_components[:flat_result_len]) + if not isinstance(result_grads, (list, tuple)): + result_grads = [result_grads] + + if variables: + input_grads, variable_grads = grad_fn(*result_grads, variables=variables) + if len(variable_grads) != len(variables): + raise ValueError("Must return gradient for each variable from " + "@custom_gradient grad_fn.") + else: + input_grads = grad_fn(*result_grads) + variable_grads = [] + + # Need to return one value per input to the IdentityN, so pad the + # gradients of the inputs of the custom_gradient function with the + # gradients of the outputs as well. + input_grads = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(input_grads)) + return ([None] * flat_result_len) + input_grads + variable_grads + + @ops.RegisterGradient(name) + def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable + """Custom grad fn wrapper.""" + return tape_grad_fn(*result_grads) + + original_tensors = all_tensors + with ops.get_default_graph().gradient_override_map({"IdentityN": name}): + all_tensors = array_ops.identity_n(all_tensors) + + original_tensors = [ops.convert_to_tensor(x) for x in original_tensors] + + # Propagate handle data for happier shape inference for resource variables. + for i, t in enumerate(original_tensors): + if t.dtype == dtypes.resource and hasattr(t, "_handle_data"): + all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access + record.record_operation( + f.__name__, all_tensors, original_tensors, tape_grad_fn) + for ot, t in zip(original_tensors, all_tensors): + handle_data_util.copy_handle_data(ot, t) + flat_result = composite_tensor_gradient.replace_flat_tensors_for_gradients( + nest.flatten(result), all_tensors[:flat_result_len]) + return nest.pack_sequence_as(result, flat_result) + + +def _eager_mode_decorator(f, args, kwargs): + """Implement custom gradient decorator for eager mode.""" + with record.VariableWatcher() as variable_watcher: + result, grad_fn = f(*args, **kwargs) + flat_args = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(args)) + flat_kwargs = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(kwargs)) + all_inputs = flat_args + flat_kwargs + # The variables that grad_fn needs to return gradients for are the set of + # variables used that are *not* part of the inputs. + variables = [ + v.deref() # pylint: disable=g-complex-comprehension + for v in set(v.ref() for v in variable_watcher.watched_variables()) + if all(v.deref() is not i for i in all_inputs) + ] + grad_argspec = tf_inspect.getfullargspec(grad_fn) + if (variables and ("variables" not in grad_argspec.args) and + ("variables" not in grad_argspec.kwonlyargs) and + not grad_argspec.varkw): + raise TypeError( + "@tf.custom_gradient grad_fn must accept keyword argument 'variables', " + "since function uses variables: {}".format(variables)) + flat_result = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(result)) + # TODO(apassos) consider removing the identity below. + flat_result = [gen_array_ops.identity(x) for x in flat_result] + + input_tensors = [ + ops.convert_to_tensor(x) for x in flat_args + list(variables)] + + recorded_inputs = input_tensors + arg_count = len(flat_args) + + def actual_grad_fn(*result_grad_components): + """Custom grad fn wrapper.""" + result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients( + nest.flatten(result), result_grad_components) + if not isinstance(result_grads, (list, tuple)): + result_grads = [result_grads] + + if variables: + input_grads, variable_grads = grad_fn(*result_grads, variables=variables) + if len(variable_grads) != len(variables): + raise ValueError("Must return gradient for each variable from " + "@custom_gradient grad_fn.") + else: + input_grads = grad_fn(*result_grads) + variable_grads = [] + flat_grads = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(input_grads)) + if len(flat_grads) != arg_count: + raise ValueError( + f"custom_gradient function expected to return {arg_count} " + f"gradients, but returned {len(flat_grads)} instead.") + return flat_grads + variable_grads + + record.record_operation(f.__name__, flat_result, recorded_inputs, + actual_grad_fn) + flat_result = composite_tensor_gradient.replace_flat_tensors_for_gradients( + nest.flatten(result), flat_result) + return nest.pack_sequence_as(result, flat_result) + + +@tf_export("recompute_grad") +def recompute_grad(f): + """Defines a function as a recompute-checkpoint for the tape auto-diff. + + Tape checkpointing is a technique to reduce the memory consumption of the + auto-diff tape: + + - Without tape checkpointing operations and intermediate values are + recorded to the tape for use in the backward pass. + + - With tape checkpointing, only the function call and its inputs are + recorded. During back-propagation the `recompute_grad` custom gradient + (`tf.custom_gradient`) recomputes the function under a localized Tape object. + This recomputation of the function during backpropagation performs redundant + calculation, but reduces the overall memory usage of the Tape. + + >>> y = tf.Variable(1.0) + + >>> def my_function(x): + ... tf.print('running') + ... z = x*y + ... return z + + >>> my_function_recompute = tf.recompute_grad(my_function) + + >>> with tf.GradientTape() as tape: + ... r = tf.constant(1.0) + ... for i in range(4): + ... r = my_function_recompute(r) + running + running + running + running + + >>> grad = tape.gradient(r, [y]) + running + running + running + running + + Without `recompute_grad`, the tape contains all intermitate steps, and no + recomputation is performed. + + >>> with tf.GradientTape() as tape: + ... r = tf.constant(1.0) + ... for i in range(4): + ... r = my_function(r) + running + running + running + running + + >>> grad = tape.gradient(r, [y]) + + + If `f` was a `tf.keras` `Model` or `Layer` object, methods and attributes + such as `f.variables` are not available on the returned function `g`. + Either keep a reference of `f` , or use `g.__wrapped__` for accessing + these variables and methods. + + + >>> def print_running_and_return(x): + ... tf.print("running") + ... return x + + >>> model = tf.keras.Sequential([ + ... tf.keras.layers.Lambda(print_running_and_return), + ... tf.keras.layers.Dense(2) + ... ]) + + >>> model_recompute = tf.recompute_grad(model) + + >>> with tf.GradientTape(persistent=True) as tape: + ... r = tf.constant([[1,2]]) + ... for i in range(4): + ... r = model_recompute(r) + running + running + running + running + + >>> grad = tape.gradient(r, model.variables) + running + running + running + running + + Alternatively, use the `__wrapped__` attribute to access the original + model object. + + >>> grad = tape.gradient(r, model_recompute.__wrapped__.variables) + running + running + running + running + + + Args: + f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs. + + Returns: + A function `g` wrapping `f` that defines a custom gradient, which recomputes + `f` on the backwards pass of a gradient call. + """ + # TODO(cdfreeman) Add is_recomputing functionality from graph mode version + + @custom_gradient + def inner(*args, **kwargs): + """Inner function closure for calculating gradients.""" + current_var_scope = variable_scope.get_variable_scope() + with record.stop_recording(): + result = f(*args, **kwargs) + + def grad_wrapper(*wrapper_args, variables=None): + """Wrapper function to accomodate lack of kwargs in graph mode custom_gradient.""" + + @custom_gradient + def inner_recompute_grad(*dresult): + """Nested custom gradient function for computing grads in reverse and forward mode autodiff.""" + # Gradient calculation for reverse mode autodiff. + with backprop.GradientTape() as t: + id_args = nest.map_structure(gen_array_ops.identity, args) + # Tuple `dresult` should contain at least one tensor. + assert len(dresult) >= 1 + + if not context.executing_eagerly(): + # XLA doesn't respect `tf.control_dependencies`. The code block + # below manually adds a data dependency to `dresult` to ensure + # recomputation of `f(*args, **kwargs)` happens after `dresult`. + + # This works even if `dresult[0]` is a size 0 tensor as reduce_max + # of a size 0 tensor returns -inf. Use reshape here to avoid reading + # the entire `dresult[0]`. + elem = math_ops.reduce_max(array_ops.reshape(dresult[0], [-1])[:1]) + # Cast elem to bool in case elem is NaN. + elem_bool = math_ops.cast(elem, dtypes.bool) + dresult_dep = array_ops.where_v2( + elem_bool == elem_bool, 0., float("nan")) # pylint: disable=comparison-with-itself + id_args = nest.map_structure( + lambda x: x + math_ops.cast(dresult_dep, x.dtype), id_args) + + t.watch(id_args) + if variables is not None: + t.watch(variables) + with variable_scope.variable_scope(current_var_scope): + recomputed_result = f(*id_args, **kwargs) + kw_vars = [] + if variables is not None: + kw_vars = list(variables) + grads = t.gradient( + recomputed_result, + list(id_args) + kw_vars, + output_gradients=dresult, + unconnected_gradients=UnconnectedGradients.ZERO) + + def transpose(*t_args, **t_kwargs): + """Gradient function calculation for forward mode autodiff.""" + # Just throw an error since gradients / activations are not stored on + # tape for recompute. + raise NotImplementedError( + "recompute_grad tried to transpose grad of {}. " + "Consider not using recompute_grad in forward mode" + "autodiff".format(f.__name__)) + + return (grads[:len(id_args)], grads[len(id_args):]), transpose + + return inner_recompute_grad(*wrapper_args) + + return result, grad_wrapper + + return tf_decorator.make_decorator(f, inner) + + +@tf_export("grad_pass_through") +def grad_pass_through(f): + """Creates a grad-pass-through op with the forward behavior provided in f. + + Use this function to wrap any op, maintaining its behavior in the forward + pass, but replacing the original op in the backward graph with an identity. + For example: + + ```python + x = tf.Variable(1.0, name="x") + z = tf.Variable(3.0, name="z") + + with tf.GradientTape() as tape: + # y will evaluate to 9.0 + y = tf.grad_pass_through(x.assign)(z**2) + # grads will evaluate to 6.0 + grads = tape.gradient(y, z) + ``` + + Another example is a 'differentiable' moving average approximation, where + gradients are allowed to flow into the last value fed to the moving average, + but the moving average is still used for the forward pass: + + ```python + x = ... # Some scalar value + # A moving average object, we don't need to know how this is implemented + moving_average = MovingAverage() + with backprop.GradientTape() as tape: + # mavg_x will evaluate to the current running average value + mavg_x = tf.grad_pass_through(moving_average)(x) + grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0 + ``` + + Args: + f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor` + outputs. + + Returns: + A function `h(x)` which returns the same values as `f(x)` and whose + gradients are the same as those of an identity function. + """ + @custom_gradient + def _grad_pass_through_op(*args, **kwargs): + def grad(*args, **kwargs): + variables = kwargs.get("variables") + if variables is not None: + # Variables involved in the wrapped op will not receive gradients. + return args, [None] * len(variables) + return args + return f(*args, **kwargs), grad + return tf_decorator.make_decorator(f, _grad_pass_through_op) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/default_gradient.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/default_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae3219578feb69242896ec52938a9315a8beaa7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/default_gradient.py @@ -0,0 +1,80 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for computing default gradients.""" +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import resource_variable_ops + + +def get_zeros_dtype(t): + """Return the dtype for the default gradient for a Tensor.""" + if t.dtype == dtypes.resource: + handle_data = resource_variable_ops.get_eager_safe_handle_data(t) + if (handle_data is None or not handle_data.is_set or + len(handle_data.shape_and_type) != 1): + raise ValueError("Internal error: Tried to take gradients (or similar) " + "of a variable without handle data:\n%s" % str(t)) + return handle_data.shape_and_type[0].dtype + return t.dtype + + +def shape_and_dtype(t): + """Return the shape and dtype for the default gradient for a Tensor.""" + if t.dtype == dtypes.resource: + handle_data = resource_variable_ops.get_eager_safe_handle_data(t) + if (handle_data is None or not handle_data.is_set or + len(handle_data.shape_and_type) != 1): + raise ValueError("Internal error: Tried to take gradients (or similar) " + "of a variable without handle data:\n%s" % str(t)) + shape_and_type = handle_data.shape_and_type[0] + return (tensor_shape.TensorShape(shape_and_type.shape), + dtypes.as_dtype(shape_and_type.dtype)) + return t.shape, t.dtype + + +def zeros_like(t): + """Like array_ops.zeros_like, but respects resource handles.""" + if t.dtype == dtypes.resource: + return array_ops.zeros(*shape_and_dtype(t)) + else: + return array_ops.zeros_like(t) + + +def ones_like(t): + """Like array_ops.ones_like, but respects resource handles.""" + if t.dtype == dtypes.resource: + return array_ops.ones(*shape_and_dtype(t)) + else: + return array_ops.ones_like(t) + + +def supports_default_grad(t): + """Whether tensor `t` supports creating a default gradient. + + This function assumes that `t` is of a trainable type. + + Args: + t: Tensor + + Returns: + Bool + """ + if t.dtype == dtypes.resource: + handle_data = resource_variable_ops.get_eager_safe_handle_data(t) + if (handle_data is None or not handle_data.is_set or + len(handle_data.shape_and_type) != 1): + return False + return True diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/filesystem_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/filesystem_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d30c24d1df9f01e83a8e351e2649c20d143606cd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/filesystem_ops.py @@ -0,0 +1,38 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Filesystem related operations.""" + +from tensorflow.python.ops import gen_filesystem_ops as _gen_filesystem_ops + + +# pylint: disable=protected-access +def filesystem_set_configuration(scheme, key, value, name=None): + """Set configuration of the file system. + + Args: + scheme: File system scheme. + key: The name of the configuration option. + value: The value of the configuration option. + name: A name for the operation (optional). + + Returns: + None. + """ + + return _gen_filesystem_ops.file_system_set_configuration( + scheme, key=key, value=value, name=name) + + +# pylint: enable=protected-access diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_batch_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_batch_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..846f568a686781a3bfa1884e356a0de48879f288 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_batch_ops.py @@ -0,0 +1,699 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated +_BatchOutput = collections.namedtuple( + "Batch", + ["batched_tensors", "batch_index", "id"]) + + +def batch(in_tensors, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, grad_timeout_micros: int, max_enqueued_batches:int=10, allowed_batch_sizes=[], container:str="", shared_name:str="", batching_queue:str="", name=None): + r"""Batches all input tensors nondeterministically. + + When many instances of this Op are being run concurrently with the same + container/shared_name in the same device, some will output zero-shaped Tensors + and others will output Tensors of size up to max_batch_size. + + All Tensors in in_tensors are batched together (so, for example, labels and + features should be batched with a single instance of this operation. + + Each invocation of batch emits an `id` scalar which will be used to identify + this particular invocation when doing unbatch or its gradient. + + Each op which emits a non-empty batch will also emit a non-empty batch_index + Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + start, and length of elements of each set of Tensors present in batched_tensors. + + Batched tensors are concatenated along the first dimension, and all tensors in + in_tensors must have the first dimension of the same size. + + in_tensors: The tensors to be batched. + num_batch_threads: Number of scheduling threads for processing batches of work. + Determines the number of batches processed in parallel. + max_batch_size: Batch sizes will never be bigger than this. + batch_timeout_micros: Maximum number of microseconds to wait before outputting + an incomplete batch. + allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + batches up to one of those sizes. The entries must increase monotonically, and + the final entry must equal max_batch_size. + grad_timeout_micros: The timeout to use for the gradient. See Unbatch. + batched_tensors: Either empty tensors or a batch of concatenated Tensors. + batch_index: If out_tensors is non-empty, has information to invert it. + container: Controls the scope of sharing of this batch. + id: always contains a scalar with a unique ID for this invocation of Batch. + shared_name: Concurrently running instances of batch in the same device with the + same container and shared_name will batch their elements together. If left + empty, the op name will be used as the shared name. + T: the types of tensors to be batched. + + Args: + in_tensors: A list of `Tensor` objects. + num_batch_threads: An `int`. + max_batch_size: An `int`. + batch_timeout_micros: An `int`. + grad_timeout_micros: An `int`. + max_enqueued_batches: An optional `int`. Defaults to `10`. + allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + batching_queue: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (batched_tensors, batch_index, id). + + batched_tensors: A list of `Tensor` objects. Has the same type as `in_tensors`. + batch_index: A `Tensor` of type `int64`. + id: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Batch", name, in_tensors, "num_batch_threads", + num_batch_threads, "max_batch_size", max_batch_size, + "max_enqueued_batches", max_enqueued_batches, "batch_timeout_micros", + batch_timeout_micros, "allowed_batch_sizes", allowed_batch_sizes, + "grad_timeout_micros", grad_timeout_micros, "container", container, + "shared_name", shared_name, "batching_queue", batching_queue) + _result = _BatchOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_eager_fallback( + in_tensors, num_batch_threads=num_batch_threads, + max_batch_size=max_batch_size, + max_enqueued_batches=max_enqueued_batches, + batch_timeout_micros=batch_timeout_micros, + allowed_batch_sizes=allowed_batch_sizes, + grad_timeout_micros=grad_timeout_micros, container=container, + shared_name=shared_name, batching_queue=batching_queue, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads") + max_batch_size = _execute.make_int(max_batch_size, "max_batch_size") + batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros") + grad_timeout_micros = _execute.make_int(grad_timeout_micros, "grad_timeout_micros") + if max_enqueued_batches is None: + max_enqueued_batches = 10 + max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches") + if allowed_batch_sizes is None: + allowed_batch_sizes = [] + if not isinstance(allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'allowed_batch_sizes' argument to " + "'batch' Op, not %r." % allowed_batch_sizes) + allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if batching_queue is None: + batching_queue = "" + batching_queue = _execute.make_str(batching_queue, "batching_queue") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Batch", in_tensors=in_tensors, num_batch_threads=num_batch_threads, + max_batch_size=max_batch_size, + batch_timeout_micros=batch_timeout_micros, + grad_timeout_micros=grad_timeout_micros, + max_enqueued_batches=max_enqueued_batches, + allowed_batch_sizes=allowed_batch_sizes, container=container, + shared_name=shared_name, batching_queue=batching_queue, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_batch_threads", _op._get_attr_int("num_batch_threads"), + "max_batch_size", _op._get_attr_int("max_batch_size"), + "max_enqueued_batches", + _op._get_attr_int("max_enqueued_batches"), + "batch_timeout_micros", + _op._get_attr_int("batch_timeout_micros"), + "allowed_batch_sizes", _op.get_attr("allowed_batch_sizes"), + "grad_timeout_micros", _op._get_attr_int("grad_timeout_micros"), + "container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name"), "batching_queue", + _op.get_attr("batching_queue"), "T", _op.get_attr("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Batch", _inputs_flat, _attrs, _result) + _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):] + _result = _BatchOutput._make(_result) + return _result + +Batch = tf_export("raw_ops.Batch")(_ops.to_raw_op(batch)) + + +def batch_eager_fallback(in_tensors, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, grad_timeout_micros: int, max_enqueued_batches: int, allowed_batch_sizes, container: str, shared_name: str, batching_queue: str, name, ctx): + num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads") + max_batch_size = _execute.make_int(max_batch_size, "max_batch_size") + batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros") + grad_timeout_micros = _execute.make_int(grad_timeout_micros, "grad_timeout_micros") + if max_enqueued_batches is None: + max_enqueued_batches = 10 + max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches") + if allowed_batch_sizes is None: + allowed_batch_sizes = [] + if not isinstance(allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'allowed_batch_sizes' argument to " + "'batch' Op, not %r." % allowed_batch_sizes) + allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if batching_queue is None: + batching_queue = "" + batching_queue = _execute.make_str(batching_queue, "batching_queue") + _attr_T, in_tensors = _execute.convert_to_mixed_eager_tensors(in_tensors, ctx) + _inputs_flat = list(in_tensors) + _attrs = ("num_batch_threads", num_batch_threads, "max_batch_size", + max_batch_size, "max_enqueued_batches", max_enqueued_batches, + "batch_timeout_micros", batch_timeout_micros, "allowed_batch_sizes", + allowed_batch_sizes, "grad_timeout_micros", grad_timeout_micros, + "container", container, "shared_name", shared_name, "batching_queue", + batching_queue, "T", _attr_T) + _result = _execute.execute(b"Batch", len(in_tensors) + 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Batch", _inputs_flat, _attrs, _result) + _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):] + _result = _BatchOutput._make(_result) + return _result + + +def batch_function(in_tensors, captured_tensors, f, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, Tout, max_enqueued_batches:int=10, allowed_batch_sizes=[], container:str="", shared_name:str="", batching_queue:str="", low_priority_max_batch_size:int=0, low_priority_batch_timeout_micros:int=0, low_priority_allowed_batch_sizes=[], low_priority_max_enqueued_batches:int=0, enable_large_batch_splitting:bool=False, name=None): + r"""Batches all the inputs tensors to the computation done by the function. + + So, for example, in the following code + + ```python + + # This input will be captured. + y = tf.placeholder_with_default(1.0, shape=[]) + + @tf.Defun(tf.float32) + def computation(a): + return tf.matmul(a, a) + y + + b = gen_batch_ops.batch_function( + f=computation + in_tensors=[a], + captured_tensors=computation.captured_inputs, + Tout=[o.type for o in computation.definition.signature.output_arg], + num_batch_threads=1, + max_batch_size=10, + batch_timeout_micros=100000, # 100ms + allowed_batch_sizes=[3, 10], + batching_queue="") + ``` + + If more than one session.run call is simultaneously trying to compute `b` + the values of `a` will be gathered, non-deterministically concatenated + along the first axis, and only one thread will run the computation. + + Assumes that all arguments of the function are Tensors which will be batched + along their first dimension. + + Arguments that are captured, are not batched. The session.run call which does + the concatenation, will use the values of the captured tensors available to it. + Therefore, typical uses of captured tensors should involve values which remain + unchanged across session.run calls. Inference is a good example of this. + + SparseTensor is not supported. The return value of the decorated function + must be a Tensor or a list/tuple of Tensors. + + Args: + in_tensors: A list of `Tensor` objects. The tensors to be batched. + captured_tensors: A list of `Tensor` objects. + The tensors which are captured in the function, and don't need + to be batched. + f: A function decorated with @Defun. + num_batch_threads: An `int`. + Number of scheduling threads for processing batches of work. + Determines the number of batches processed in parallel. + max_batch_size: An `int`. Batch sizes will never be bigger than this. + batch_timeout_micros: An `int`. + Maximum number of microseconds to wait before outputting + an incomplete batch. + Tout: A list of `tf.DTypes` that has length `>= 1`. + the types of the output tensors. + max_enqueued_batches: An optional `int`. Defaults to `10`. + Maximum number of batches enqueued. Default: 10. + allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`. + Optional list of allowed batch sizes. If left empty, does + nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + batches up to one of those sizes. The entries must increase monotonically. + If enable_large_batch_splitting is false (i.e., large-input-split is not + enabled) the final entry must equal max_batch_size. + container: An optional `string`. Defaults to `""`. + Controls the scope of sharing of this batch. + shared_name: An optional `string`. Defaults to `""`. + Concurrently running instances of batch in the same device with the + same container and shared_name will batch their elements together. If left + empty, the op name will be used as the shared name. + batching_queue: An optional `string`. Defaults to `""`. + low_priority_max_batch_size: An optional `int`. Defaults to `0`. + low_priority_batch_timeout_micros: An optional `int`. Defaults to `0`. + low_priority_allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`. + low_priority_max_enqueued_batches: An optional `int`. Defaults to `0`. + enable_large_batch_splitting: An optional `bool`. Defaults to `False`. + input with a large size (i.e., larger than the largest value of + `allowed_batch_sizes`) will be splitted into multiple batches with batch size. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchFunction", name, in_tensors, captured_tensors, "f", f, + "num_batch_threads", num_batch_threads, "max_batch_size", + max_batch_size, "batch_timeout_micros", batch_timeout_micros, + "max_enqueued_batches", max_enqueued_batches, "allowed_batch_sizes", + allowed_batch_sizes, "container", container, "shared_name", + shared_name, "batching_queue", batching_queue, + "low_priority_max_batch_size", low_priority_max_batch_size, + "low_priority_batch_timeout_micros", + low_priority_batch_timeout_micros, "low_priority_allowed_batch_sizes", + low_priority_allowed_batch_sizes, "low_priority_max_enqueued_batches", + low_priority_max_enqueued_batches, "Tout", Tout, + "enable_large_batch_splitting", enable_large_batch_splitting) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_function_eager_fallback( + in_tensors, captured_tensors, f=f, + num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, + batch_timeout_micros=batch_timeout_micros, + max_enqueued_batches=max_enqueued_batches, + allowed_batch_sizes=allowed_batch_sizes, container=container, + shared_name=shared_name, batching_queue=batching_queue, + low_priority_max_batch_size=low_priority_max_batch_size, + low_priority_batch_timeout_micros=low_priority_batch_timeout_micros, + low_priority_allowed_batch_sizes=low_priority_allowed_batch_sizes, + low_priority_max_enqueued_batches=low_priority_max_enqueued_batches, + Tout=Tout, + enable_large_batch_splitting=enable_large_batch_splitting, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads") + max_batch_size = _execute.make_int(max_batch_size, "max_batch_size") + batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros") + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'batch_function' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if max_enqueued_batches is None: + max_enqueued_batches = 10 + max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches") + if allowed_batch_sizes is None: + allowed_batch_sizes = [] + if not isinstance(allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'allowed_batch_sizes' argument to " + "'batch_function' Op, not %r." % allowed_batch_sizes) + allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if batching_queue is None: + batching_queue = "" + batching_queue = _execute.make_str(batching_queue, "batching_queue") + if low_priority_max_batch_size is None: + low_priority_max_batch_size = 0 + low_priority_max_batch_size = _execute.make_int(low_priority_max_batch_size, "low_priority_max_batch_size") + if low_priority_batch_timeout_micros is None: + low_priority_batch_timeout_micros = 0 + low_priority_batch_timeout_micros = _execute.make_int(low_priority_batch_timeout_micros, "low_priority_batch_timeout_micros") + if low_priority_allowed_batch_sizes is None: + low_priority_allowed_batch_sizes = [] + if not isinstance(low_priority_allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'low_priority_allowed_batch_sizes' argument to " + "'batch_function' Op, not %r." % low_priority_allowed_batch_sizes) + low_priority_allowed_batch_sizes = [_execute.make_int(_i, "low_priority_allowed_batch_sizes") for _i in low_priority_allowed_batch_sizes] + if low_priority_max_enqueued_batches is None: + low_priority_max_enqueued_batches = 0 + low_priority_max_enqueued_batches = _execute.make_int(low_priority_max_enqueued_batches, "low_priority_max_enqueued_batches") + if enable_large_batch_splitting is None: + enable_large_batch_splitting = False + enable_large_batch_splitting = _execute.make_bool(enable_large_batch_splitting, "enable_large_batch_splitting") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchFunction", in_tensors=in_tensors, + captured_tensors=captured_tensors, f=f, + num_batch_threads=num_batch_threads, + max_batch_size=max_batch_size, + batch_timeout_micros=batch_timeout_micros, Tout=Tout, + max_enqueued_batches=max_enqueued_batches, + allowed_batch_sizes=allowed_batch_sizes, + container=container, shared_name=shared_name, + batching_queue=batching_queue, + low_priority_max_batch_size=low_priority_max_batch_size, + low_priority_batch_timeout_micros=low_priority_batch_timeout_micros, + low_priority_allowed_batch_sizes=low_priority_allowed_batch_sizes, + low_priority_max_enqueued_batches=low_priority_max_enqueued_batches, + enable_large_batch_splitting=enable_large_batch_splitting, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "num_batch_threads", + _op._get_attr_int("num_batch_threads"), "max_batch_size", + _op._get_attr_int("max_batch_size"), "batch_timeout_micros", + _op._get_attr_int("batch_timeout_micros"), + "max_enqueued_batches", + _op._get_attr_int("max_enqueued_batches"), + "allowed_batch_sizes", _op.get_attr("allowed_batch_sizes"), + "container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name"), "batching_queue", + _op.get_attr("batching_queue"), "low_priority_max_batch_size", + _op._get_attr_int("low_priority_max_batch_size"), + "low_priority_batch_timeout_micros", + _op._get_attr_int("low_priority_batch_timeout_micros"), + "low_priority_allowed_batch_sizes", + _op.get_attr("low_priority_allowed_batch_sizes"), + "low_priority_max_enqueued_batches", + _op._get_attr_int("low_priority_max_enqueued_batches"), "Tin", + _op.get_attr("Tin"), "Tcaptured", _op.get_attr("Tcaptured"), + "Tout", _op.get_attr("Tout"), "enable_large_batch_splitting", + _op._get_attr_bool("enable_large_batch_splitting")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchFunction", _inputs_flat, _attrs, _result) + return _result + +BatchFunction = tf_export("raw_ops.BatchFunction")(_ops.to_raw_op(batch_function)) + + +def batch_function_eager_fallback(in_tensors, captured_tensors, f, num_batch_threads: int, max_batch_size: int, batch_timeout_micros: int, Tout, max_enqueued_batches: int, allowed_batch_sizes, container: str, shared_name: str, batching_queue: str, low_priority_max_batch_size: int, low_priority_batch_timeout_micros: int, low_priority_allowed_batch_sizes, low_priority_max_enqueued_batches: int, enable_large_batch_splitting: bool, name, ctx): + num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads") + max_batch_size = _execute.make_int(max_batch_size, "max_batch_size") + batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros") + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'batch_function' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if max_enqueued_batches is None: + max_enqueued_batches = 10 + max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches") + if allowed_batch_sizes is None: + allowed_batch_sizes = [] + if not isinstance(allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'allowed_batch_sizes' argument to " + "'batch_function' Op, not %r." % allowed_batch_sizes) + allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if batching_queue is None: + batching_queue = "" + batching_queue = _execute.make_str(batching_queue, "batching_queue") + if low_priority_max_batch_size is None: + low_priority_max_batch_size = 0 + low_priority_max_batch_size = _execute.make_int(low_priority_max_batch_size, "low_priority_max_batch_size") + if low_priority_batch_timeout_micros is None: + low_priority_batch_timeout_micros = 0 + low_priority_batch_timeout_micros = _execute.make_int(low_priority_batch_timeout_micros, "low_priority_batch_timeout_micros") + if low_priority_allowed_batch_sizes is None: + low_priority_allowed_batch_sizes = [] + if not isinstance(low_priority_allowed_batch_sizes, (list, tuple)): + raise TypeError( + "Expected list for 'low_priority_allowed_batch_sizes' argument to " + "'batch_function' Op, not %r." % low_priority_allowed_batch_sizes) + low_priority_allowed_batch_sizes = [_execute.make_int(_i, "low_priority_allowed_batch_sizes") for _i in low_priority_allowed_batch_sizes] + if low_priority_max_enqueued_batches is None: + low_priority_max_enqueued_batches = 0 + low_priority_max_enqueued_batches = _execute.make_int(low_priority_max_enqueued_batches, "low_priority_max_enqueued_batches") + if enable_large_batch_splitting is None: + enable_large_batch_splitting = False + enable_large_batch_splitting = _execute.make_bool(enable_large_batch_splitting, "enable_large_batch_splitting") + _attr_Tin, in_tensors = _execute.convert_to_mixed_eager_tensors(in_tensors, ctx) + _attr_Tcaptured, captured_tensors = _execute.convert_to_mixed_eager_tensors(captured_tensors, ctx) + _inputs_flat = list(in_tensors) + list(captured_tensors) + _attrs = ("f", f, "num_batch_threads", num_batch_threads, "max_batch_size", + max_batch_size, "batch_timeout_micros", batch_timeout_micros, + "max_enqueued_batches", max_enqueued_batches, "allowed_batch_sizes", + allowed_batch_sizes, "container", container, "shared_name", shared_name, + "batching_queue", batching_queue, "low_priority_max_batch_size", + low_priority_max_batch_size, "low_priority_batch_timeout_micros", + low_priority_batch_timeout_micros, "low_priority_allowed_batch_sizes", + low_priority_allowed_batch_sizes, "low_priority_max_enqueued_batches", + low_priority_max_enqueued_batches, "Tin", _attr_Tin, "Tcaptured", + _attr_Tcaptured, "Tout", Tout, "enable_large_batch_splitting", + enable_large_batch_splitting) + _result = _execute.execute(b"BatchFunction", len(Tout), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchFunction", _inputs_flat, _attrs, _result) + return _result + + +TV_Unbatch_T = TypeVar("TV_Unbatch_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def unbatch(batched_tensor: Annotated[Any, TV_Unbatch_T], batch_index: Annotated[Any, _atypes.Int64], id: Annotated[Any, _atypes.Int64], timeout_micros: int, container:str="", shared_name:str="", name=None) -> Annotated[Any, TV_Unbatch_T]: + r"""Reverses the operation of Batch for a single output Tensor. + + An instance of Unbatch either receives an empty batched_tensor, in which case it + asynchronously waits until the values become available from a concurrently + running instance of Unbatch with the same container and shared_name, or receives + a non-empty batched_tensor in which case it finalizes all other concurrently + running instances and outputs its own element from the batch. + + batched_tensor: The possibly transformed output of Batch. The size of the first + dimension should remain unchanged by the transformations for the operation to + work. + batch_index: The matching batch_index obtained from Batch. + id: The id scalar emitted by Batch. + unbatched_tensor: The Tensor corresponding to this execution. + timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + batched input tensor associated with a given invocation of the op. + container: Container to control resource sharing. + shared_name: Instances of Unbatch with the same container and shared_name are + assumed to possibly belong to the same batch. If left empty, the op name will + be used as the shared name. + + Args: + batched_tensor: A `Tensor`. + batch_index: A `Tensor` of type `int64`. + id: A `Tensor` of type `int64`. + timeout_micros: An `int`. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `batched_tensor`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Unbatch", name, batched_tensor, batch_index, id, + "timeout_micros", timeout_micros, "container", container, + "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unbatch_eager_fallback( + batched_tensor, batch_index, id, timeout_micros=timeout_micros, + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + timeout_micros = _execute.make_int(timeout_micros, "timeout_micros") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Unbatch", batched_tensor=batched_tensor, batch_index=batch_index, + id=id, timeout_micros=timeout_micros, container=container, + shared_name=shared_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("timeout_micros", _op._get_attr_int("timeout_micros"), + "container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Unbatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Unbatch = tf_export("raw_ops.Unbatch")(_ops.to_raw_op(unbatch)) + + +def unbatch_eager_fallback(batched_tensor: Annotated[Any, TV_Unbatch_T], batch_index: Annotated[Any, _atypes.Int64], id: Annotated[Any, _atypes.Int64], timeout_micros: int, container: str, shared_name: str, name, ctx) -> Annotated[Any, TV_Unbatch_T]: + timeout_micros = _execute.make_int(timeout_micros, "timeout_micros") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _attr_T, (batched_tensor,) = _execute.args_to_matching_eager([batched_tensor], ctx, []) + batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64) + id = _ops.convert_to_tensor(id, _dtypes.int64) + _inputs_flat = [batched_tensor, batch_index, id] + _attrs = ("timeout_micros", timeout_micros, "container", container, + "shared_name", shared_name, "T", _attr_T) + _result = _execute.execute(b"Unbatch", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Unbatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnbatchGrad_T = TypeVar("TV_UnbatchGrad_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def unbatch_grad(original_input: Annotated[Any, TV_UnbatchGrad_T], batch_index: Annotated[Any, _atypes.Int64], grad: Annotated[Any, TV_UnbatchGrad_T], id: Annotated[Any, _atypes.Int64], container:str="", shared_name:str="", name=None) -> Annotated[Any, TV_UnbatchGrad_T]: + r"""Gradient of Unbatch. + + Acts like Batch but using the given batch_index index of batching things as they + become available. This ensures that the gradients are propagated back in the + same session which did the forward pass. + + original_input: The input to the Unbatch operation this is the gradient of. + batch_index: The batch_index given to the Unbatch operation this is the gradient + of. + grad: The downstream gradient. + id: The id scalar emitted by Batch. + batched_grad: The return value, either an empty tensor or the batched gradient. + container: Container to control resource sharing. + shared_name: Instances of UnbatchGrad with the same container and shared_name + are assumed to possibly belong to the same batch. If left empty, the op name + will be used as the shared name. + + Args: + original_input: A `Tensor`. + batch_index: A `Tensor` of type `int64`. + grad: A `Tensor`. Must have the same type as `original_input`. + id: A `Tensor` of type `int64`. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `original_input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnbatchGrad", name, original_input, batch_index, grad, id, + "container", container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unbatch_grad_eager_fallback( + original_input, batch_index, grad, id, container=container, + shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnbatchGrad", original_input=original_input, batch_index=batch_index, + grad=grad, id=id, container=container, + shared_name=shared_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnbatchGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnbatchGrad = tf_export("raw_ops.UnbatchGrad")(_ops.to_raw_op(unbatch_grad)) + + +def unbatch_grad_eager_fallback(original_input: Annotated[Any, TV_UnbatchGrad_T], batch_index: Annotated[Any, _atypes.Int64], grad: Annotated[Any, TV_UnbatchGrad_T], id: Annotated[Any, _atypes.Int64], container: str, shared_name: str, name, ctx) -> Annotated[Any, TV_UnbatchGrad_T]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _attr_T, _inputs_T = _execute.args_to_matching_eager([original_input, grad], ctx, []) + (original_input, grad) = _inputs_T + batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64) + id = _ops.convert_to_tensor(id, _dtypes.int64) + _inputs_flat = [original_input, batch_index, grad, id] + _attrs = ("container", container, "shared_name", shared_name, "T", _attr_T) + _result = _execute.execute(b"UnbatchGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnbatchGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_bitwise_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_bitwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7d68681f43d638850d1fc7c4a5a6e4e3d8fb796e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_bitwise_ops.py @@ -0,0 +1,765 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_BitwiseAnd_T = TypeVar("TV_BitwiseAnd_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.bitwise_and') +def bitwise_and(x: Annotated[Any, TV_BitwiseAnd_T], y: Annotated[Any, TV_BitwiseAnd_T], name=None) -> Annotated[Any, TV_BitwiseAnd_T]: + r"""Elementwise computes the bitwise AND of `x` and `y`. + + The result will have those bits set, that are set in both `x` and `y`. The + computation is performed on the underlying representations of `x` and `y`. + + For example: + + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + tf.uint8, tf.uint16, tf.uint32, tf.uint64] + + for dtype in dtype_list: + lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + + res = bitwise_ops.bitwise_and(lhs, rhs) + tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BitwiseAnd", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_bitwise_and( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return bitwise_and_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_and, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_bitwise_and( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BitwiseAnd", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_and, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BitwiseAnd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BitwiseAnd = tf_export("raw_ops.BitwiseAnd")(_ops.to_raw_op(bitwise_and)) +_dispatcher_for_bitwise_and = bitwise_and._tf_type_based_dispatcher.Dispatch + + +def bitwise_and_eager_fallback(x: Annotated[Any, TV_BitwiseAnd_T], y: Annotated[Any, TV_BitwiseAnd_T], name, ctx) -> Annotated[Any, TV_BitwiseAnd_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BitwiseAnd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BitwiseAnd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BitwiseOr_T = TypeVar("TV_BitwiseOr_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.bitwise_or') +def bitwise_or(x: Annotated[Any, TV_BitwiseOr_T], y: Annotated[Any, TV_BitwiseOr_T], name=None) -> Annotated[Any, TV_BitwiseOr_T]: + r"""Elementwise computes the bitwise OR of `x` and `y`. + + The result will have those bits set, that are set in `x`, `y` or both. The + computation is performed on the underlying representations of `x` and `y`. + + For example: + + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + tf.uint8, tf.uint16, tf.uint32, tf.uint64] + + for dtype in dtype_list: + lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + + res = bitwise_ops.bitwise_or(lhs, rhs) + tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BitwiseOr", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_bitwise_or( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return bitwise_or_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_or, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_bitwise_or( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BitwiseOr", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_or, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BitwiseOr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BitwiseOr = tf_export("raw_ops.BitwiseOr")(_ops.to_raw_op(bitwise_or)) +_dispatcher_for_bitwise_or = bitwise_or._tf_type_based_dispatcher.Dispatch + + +def bitwise_or_eager_fallback(x: Annotated[Any, TV_BitwiseOr_T], y: Annotated[Any, TV_BitwiseOr_T], name, ctx) -> Annotated[Any, TV_BitwiseOr_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BitwiseOr", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BitwiseOr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BitwiseXor_T = TypeVar("TV_BitwiseXor_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.bitwise_xor') +def bitwise_xor(x: Annotated[Any, TV_BitwiseXor_T], y: Annotated[Any, TV_BitwiseXor_T], name=None) -> Annotated[Any, TV_BitwiseXor_T]: + r"""Elementwise computes the bitwise XOR of `x` and `y`. + + The result will have those bits set, that are different in `x` and `y`. The + computation is performed on the underlying representations of `x` and `y`. + + For example: + + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + tf.uint8, tf.uint16, tf.uint32, tf.uint64] + + for dtype in dtype_list: + lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + + res = bitwise_ops.bitwise_xor(lhs, rhs) + tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BitwiseXor", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_bitwise_xor( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return bitwise_xor_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_xor, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_bitwise_xor( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BitwiseXor", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + bitwise_xor, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BitwiseXor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BitwiseXor = tf_export("raw_ops.BitwiseXor")(_ops.to_raw_op(bitwise_xor)) +_dispatcher_for_bitwise_xor = bitwise_xor._tf_type_based_dispatcher.Dispatch + + +def bitwise_xor_eager_fallback(x: Annotated[Any, TV_BitwiseXor_T], y: Annotated[Any, TV_BitwiseXor_T], name, ctx) -> Annotated[Any, TV_BitwiseXor_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BitwiseXor", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BitwiseXor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Invert_T = TypeVar("TV_Invert_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.invert') +def invert(x: Annotated[Any, TV_Invert_T], name=None) -> Annotated[Any, TV_Invert_T]: + r"""Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010. + + Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. + This operation is performed on each element of the tensor argument `x`. + + Example: + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + + # flip 2 (00000010) to -3 (11111101) + tf.assert_equal(-3, bitwise_ops.invert(2)) + + dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] + + inputs = [0, 5, 3, 14] + for dtype in dtype_list: + # Because of issues with negative numbers, let's test this indirectly. + # 1. invert(a) and a = 0 + # 2. invert(a) or a = invert(0) + input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + input_tensor, bitwise_ops.invert(input_tensor)), + bitwise_ops.bitwise_or( + input_tensor, bitwise_ops.invert(input_tensor)), + bitwise_ops.invert( + tf.constant(0, dtype=dtype))] + + expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) + + expected = tf.cast([not_0] * 4, tf.float32) + tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) + + # For unsigned dtypes let's also check the result directly. + if dtype.is_unsigned: + inverted = bitwise_ops.invert(input_tensor) + expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Invert", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_invert( + (x, name,), None) + if _result is not NotImplemented: + return _result + return invert_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + invert, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_invert( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Invert", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + invert, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Invert", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Invert = tf_export("raw_ops.Invert")(_ops.to_raw_op(invert)) +_dispatcher_for_invert = invert._tf_type_based_dispatcher.Dispatch + + +def invert_eager_fallback(x: Annotated[Any, TV_Invert_T], name, ctx) -> Annotated[Any, TV_Invert_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Invert", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Invert", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_LeftShift_T = TypeVar("TV_LeftShift_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.left_shift') +def left_shift(x: Annotated[Any, TV_LeftShift_T], y: Annotated[Any, TV_LeftShift_T], name=None) -> Annotated[Any, TV_LeftShift_T]: + r"""Elementwise computes the bitwise left-shift of `x` and `y`. + + If `y` is negative, or greater than or equal to the width of `x` in bits the + result is implementation defined. + + Example: + + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + import numpy as np + dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + + for dtype in dtype_list: + lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + + left_shift_result = bitwise_ops.left_shift(lhs, rhs) + + print(left_shift_result) + + # This will print: + # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + + lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + bitwise_ops.left_shift(lhs, rhs) + # + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LeftShift", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_left_shift( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return left_shift_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + left_shift, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_left_shift( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LeftShift", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + left_shift, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LeftShift", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LeftShift = tf_export("raw_ops.LeftShift")(_ops.to_raw_op(left_shift)) +_dispatcher_for_left_shift = left_shift._tf_type_based_dispatcher.Dispatch + + +def left_shift_eager_fallback(x: Annotated[Any, TV_LeftShift_T], y: Annotated[Any, TV_LeftShift_T], name, ctx) -> Annotated[Any, TV_LeftShift_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"LeftShift", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LeftShift", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_PopulationCount_T = TypeVar("TV_PopulationCount_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def population_count(x: Annotated[Any, TV_PopulationCount_T], name=None) -> Annotated[Any, _atypes.UInt8]: + r"""Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). + + For each entry in `x`, calculates the number of `1` (on) bits in the binary + representation of that entry. + + **NOTE**: It is more efficient to first `tf.bitcast` your tensors into + `int32` or `int64` and perform the bitcount on the result, than to feed in + 8- or 16-bit inputs and then aggregate the resulting counts. + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `uint8`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PopulationCount", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return population_count_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PopulationCount", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PopulationCount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PopulationCount = tf_export("raw_ops.PopulationCount")(_ops.to_raw_op(population_count)) + + +def population_count_eager_fallback(x: Annotated[Any, TV_PopulationCount_T], name, ctx) -> Annotated[Any, _atypes.UInt8]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"PopulationCount", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PopulationCount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RightShift_T = TypeVar("TV_RightShift_T", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('bitwise.right_shift') +def right_shift(x: Annotated[Any, TV_RightShift_T], y: Annotated[Any, TV_RightShift_T], name=None) -> Annotated[Any, TV_RightShift_T]: + r"""Elementwise computes the bitwise right-shift of `x` and `y`. + + Performs a logical shift for unsigned integer types, and an arithmetic shift + for signed integer types. + + If `y` is negative, or greater than or equal to than the width of `x` in bits + the result is implementation defined. + + Example: + + ```python + import tensorflow as tf + from tensorflow.python.ops import bitwise_ops + import numpy as np + dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + + for dtype in dtype_list: + lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + + right_shift_result = bitwise_ops.right_shift(lhs, rhs) + + print(right_shift_result) + + # This will print: + # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + + lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + bitwise_ops.right_shift(lhs, rhs) + # + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RightShift", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_right_shift( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return right_shift_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + right_shift, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_right_shift( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RightShift", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + right_shift, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RightShift", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RightShift = tf_export("raw_ops.RightShift")(_ops.to_raw_op(right_shift)) +_dispatcher_for_right_shift = right_shift._tf_type_based_dispatcher.Dispatch + + +def right_shift_eager_fallback(x: Annotated[Any, TV_RightShift_T], y: Annotated[Any, TV_RightShift_T], name, ctx) -> Annotated[Any, TV_RightShift_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RightShift", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RightShift", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_collective_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_collective_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..47dab58eb6178b1a5823723883a7fc4335d4958c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_collective_ops.py @@ -0,0 +1,1452 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_CollectiveAllToAllV2_T = TypeVar("TV_CollectiveAllToAllV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_all_to_all_v2(input: Annotated[Any, TV_CollectiveAllToAllV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, name=None) -> Annotated[Any, TV_CollectiveAllToAllV2_T]: + r"""Mutually exchanges multiple tensors of identical type and shape. + + `is_stateless` means each op does not need control dependencies to other + collective ops. In this case, keys that are unique at runtime + (e.g. `instance_key`) should be used to distinguish collective groups. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + ordering_token: A list of `Tensor` objects with type `resource`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + is_stateless: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveAllToAllV2", name, input, group_size, group_key, + instance_key, ordering_token, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds, + "is_stateless", is_stateless) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_all_to_all_v2_eager_fallback( + input, group_size, group_key, instance_key, ordering_token, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, is_stateless=is_stateless, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_all_to_all_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveAllToAllV2", input=input, group_size=group_size, + group_key=group_key, + instance_key=instance_key, + ordering_token=ordering_token, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, + is_stateless=is_stateless, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds"), "is_stateless", + _op._get_attr_bool("is_stateless"), "Nordering_token", + _op._get_attr_int("Nordering_token")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveAllToAllV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveAllToAllV2 = tf_export("raw_ops.CollectiveAllToAllV2")(_ops.to_raw_op(collective_all_to_all_v2)) + + +def collective_all_to_all_v2_eager_fallback(input: Annotated[Any, TV_CollectiveAllToAllV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint: str, timeout_seconds: float, is_stateless: bool, name, ctx) -> Annotated[Any, TV_CollectiveAllToAllV2_T]: + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_all_to_all_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource) + _inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token) + _attrs = ("T", _attr_T, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds, "is_stateless", is_stateless, + "Nordering_token", _attr_Nordering_token) + _result = _execute.execute(b"CollectiveAllToAllV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveAllToAllV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveAllToAllV3_T = TypeVar("TV_CollectiveAllToAllV3_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_all_to_all_v3(input: Annotated[Any, TV_CollectiveAllToAllV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveAllToAllV3_T]: + r"""Mutually exchanges multiple tensors of identical type and shape. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + communicator: A `Tensor` of type `resource`. + group_assignment: A `Tensor` of type `int32`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveAllToAllV3", name, input, communicator, + group_assignment, "timeout_seconds", timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_all_to_all_v3_eager_fallback( + input, communicator, group_assignment, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveAllToAllV3", input=input, communicator=communicator, + group_assignment=group_assignment, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveAllToAllV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveAllToAllV3 = tf_export("raw_ops.CollectiveAllToAllV3")(_ops.to_raw_op(collective_all_to_all_v3)) + + +def collective_all_to_all_v3_eager_fallback(input: Annotated[Any, TV_CollectiveAllToAllV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveAllToAllV3_T]: + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + communicator = _ops.convert_to_tensor(communicator, _dtypes.resource) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + _inputs_flat = [input, communicator, group_assignment] + _attrs = ("T", _attr_T, "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveAllToAllV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveAllToAllV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_CollectiveAssignGroupV2Output = collections.namedtuple( + "CollectiveAssignGroupV2", + ["group_size", "group_key"]) + + +def collective_assign_group_v2(group_assignment: Annotated[Any, _atypes.Int32], device_index: Annotated[Any, _atypes.Int32], base_key: Annotated[Any, _atypes.Int32], name=None): + r"""Assign group keys based on group assignment. + + Args: + group_assignment: A `Tensor` of type `int32`. + device_index: A `Tensor` of type `int32`. + base_key: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (group_size, group_key). + + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveAssignGroupV2", name, group_assignment, device_index, + base_key) + _result = _CollectiveAssignGroupV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_assign_group_v2_eager_fallback( + group_assignment, device_index, base_key, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveAssignGroupV2", group_assignment=group_assignment, + device_index=device_index, + base_key=base_key, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveAssignGroupV2", _inputs_flat, _attrs, _result) + _result = _CollectiveAssignGroupV2Output._make(_result) + return _result + +CollectiveAssignGroupV2 = tf_export("raw_ops.CollectiveAssignGroupV2")(_ops.to_raw_op(collective_assign_group_v2)) + + +def collective_assign_group_v2_eager_fallback(group_assignment: Annotated[Any, _atypes.Int32], device_index: Annotated[Any, _atypes.Int32], base_key: Annotated[Any, _atypes.Int32], name, ctx): + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + device_index = _ops.convert_to_tensor(device_index, _dtypes.int32) + base_key = _ops.convert_to_tensor(base_key, _dtypes.int32) + _inputs_flat = [group_assignment, device_index, base_key] + _attrs = None + _result = _execute.execute(b"CollectiveAssignGroupV2", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveAssignGroupV2", _inputs_flat, _attrs, _result) + _result = _CollectiveAssignGroupV2Output._make(_result) + return _result + + +TV_CollectiveBcastRecv_T = TypeVar("TV_CollectiveBcastRecv_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_bcast_recv(T: TV_CollectiveBcastRecv_T, group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastRecv_T]: + r"""Receives a tensor value broadcast from another device. + + Args: + T: A `tf.DType` from: `tf.bool, tf.float32, tf.half, tf.float64, tf.int32, tf.int64`. + group_size: An `int`. + group_key: An `int`. + instance_key: An `int`. + shape: A `tf.TensorShape` or list of `ints`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `T`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveBcastRecv", name, "T", T, "group_size", group_size, + "group_key", group_key, "instance_key", instance_key, "shape", shape, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_bcast_recv_eager_fallback( + T=T, group_size=group_size, group_key=group_key, + instance_key=instance_key, shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + T = _execute.make_type(T, "T") + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveBcastRecv", T=T, group_size=group_size, + group_key=group_key, instance_key=instance_key, + shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "group_size", + _op._get_attr_int("group_size"), "group_key", + _op._get_attr_int("group_key"), "instance_key", + _op._get_attr_int("instance_key"), "shape", + _op.get_attr("shape"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveBcastRecv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveBcastRecv = tf_export("raw_ops.CollectiveBcastRecv")(_ops.to_raw_op(collective_bcast_recv)) + + +def collective_bcast_recv_eager_fallback(T: TV_CollectiveBcastRecv_T, group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastRecv_T]: + T = _execute.make_type(T, "T") + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _inputs_flat = [] + _attrs = ("T", T, "group_size", group_size, "group_key", group_key, + "instance_key", instance_key, "shape", shape, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveBcastRecv", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveBcastRecv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveBcastRecvV2_T = TypeVar("TV_CollectiveBcastRecvV2_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_CollectiveBcastRecvV2_Tshape = TypeVar("TV_CollectiveBcastRecvV2_Tshape", _atypes.Int32, _atypes.Int64) + +def collective_bcast_recv_v2(group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], shape: Annotated[Any, TV_CollectiveBcastRecvV2_Tshape], T: TV_CollectiveBcastRecvV2_T, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastRecvV2_T]: + r"""Receives a tensor value broadcast from another device. + + Args: + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + T: A `tf.DType` from: `tf.bool, tf.float32, tf.half, tf.float64, tf.int32, tf.int64`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `T`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveBcastRecvV2", name, group_size, group_key, + instance_key, shape, "T", T, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_bcast_recv_v2_eager_fallback( + group_size, group_key, instance_key, shape, T=T, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + T = _execute.make_type(T, "T") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveBcastRecvV2", group_size=group_size, group_key=group_key, + instance_key=instance_key, shape=shape, T=T, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tshape", + _op._get_attr_type("Tshape"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveBcastRecvV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveBcastRecvV2 = tf_export("raw_ops.CollectiveBcastRecvV2")(_ops.to_raw_op(collective_bcast_recv_v2)) + + +def collective_bcast_recv_v2_eager_fallback(group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], shape: Annotated[Any, TV_CollectiveBcastRecvV2_Tshape], T: TV_CollectiveBcastRecvV2_T, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastRecvV2_T]: + T = _execute.make_type(T, "T") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + _inputs_flat = [group_size, group_key, instance_key, shape] + _attrs = ("T", T, "Tshape", _attr_Tshape, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveBcastRecvV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveBcastRecvV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveBcastSend_T = TypeVar("TV_CollectiveBcastSend_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_bcast_send(input: Annotated[Any, TV_CollectiveBcastSend_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastSend_T]: + r"""Broadcasts a tensor value to one or more other devices. + + Args: + input: A `Tensor`. Must be one of the following types: `bool`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: An `int`. + group_key: An `int`. + instance_key: An `int`. + shape: A `tf.TensorShape` or list of `ints`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveBcastSend", name, input, "group_size", group_size, + "group_key", group_key, "instance_key", instance_key, "shape", shape, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_bcast_send_eager_fallback( + input, group_size=group_size, group_key=group_key, + instance_key=instance_key, shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveBcastSend", input=input, group_size=group_size, + group_key=group_key, instance_key=instance_key, + shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "group_size", + _op._get_attr_int("group_size"), "group_key", + _op._get_attr_int("group_key"), "instance_key", + _op._get_attr_int("instance_key"), "shape", + _op.get_attr("shape"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveBcastSend", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveBcastSend = tf_export("raw_ops.CollectiveBcastSend")(_ops.to_raw_op(collective_bcast_send)) + + +def collective_bcast_send_eager_fallback(input: Annotated[Any, TV_CollectiveBcastSend_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastSend_T]: + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bool, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key, + "instance_key", instance_key, "shape", shape, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveBcastSend", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveBcastSend", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveBcastSendV2_T = TypeVar("TV_CollectiveBcastSendV2_T", _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_bcast_send_v2(input: Annotated[Any, TV_CollectiveBcastSendV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveBcastSendV2_T]: + r"""Broadcasts a tensor value to one or more other devices. + + Args: + input: A `Tensor`. Must be one of the following types: `bool`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveBcastSendV2", name, input, group_size, group_key, + instance_key, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_bcast_send_v2_eager_fallback( + input, group_size, group_key, instance_key, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveBcastSendV2", input=input, group_size=group_size, + group_key=group_key, + instance_key=instance_key, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveBcastSendV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveBcastSendV2 = tf_export("raw_ops.CollectiveBcastSendV2")(_ops.to_raw_op(collective_bcast_send_v2)) + + +def collective_bcast_send_v2_eager_fallback(input: Annotated[Any, TV_CollectiveBcastSendV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveBcastSendV2_T]: + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bool, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + _inputs_flat = [input, group_size, group_key, instance_key] + _attrs = ("T", _attr_T, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveBcastSendV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveBcastSendV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveGather_T = TypeVar("TV_CollectiveGather_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_gather(input: Annotated[Any, TV_CollectiveGather_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveGather_T]: + r"""Mutually accumulates multiple tensors of identical type and shape. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`. + group_size: An `int`. + group_key: An `int`. + instance_key: An `int`. + shape: A `tf.TensorShape` or list of `ints`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveGather", name, input, "group_size", group_size, + "group_key", group_key, "instance_key", instance_key, "shape", shape, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_gather_eager_fallback( + input, group_size=group_size, group_key=group_key, + instance_key=instance_key, shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveGather", input=input, group_size=group_size, + group_key=group_key, instance_key=instance_key, + shape=shape, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "group_size", + _op._get_attr_int("group_size"), "group_key", + _op._get_attr_int("group_key"), "instance_key", + _op._get_attr_int("instance_key"), "shape", + _op.get_attr("shape"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveGather = tf_export("raw_ops.CollectiveGather")(_ops.to_raw_op(collective_gather)) + + +def collective_gather_eager_fallback(input: Annotated[Any, TV_CollectiveGather_T], group_size: int, group_key: int, instance_key: int, shape, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveGather_T]: + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + shape = _execute.make_shape(shape, "shape") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key, + "instance_key", instance_key, "shape", shape, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds) + _result = _execute.execute(b"CollectiveGather", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveGatherV2_T = TypeVar("TV_CollectiveGatherV2_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_gather_v2(input: Annotated[Any, TV_CollectiveGatherV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, name=None) -> Annotated[Any, TV_CollectiveGatherV2_T]: + r"""Mutually accumulates multiple tensors of identical type and shape. + + `is_stateless` means each op does not need control dependencies to other + collective ops. In this case, keys that are unique at runtime + (e.g. `instance_key`) should be used to distinguish collective groups. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`. + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + ordering_token: A list of `Tensor` objects with type `resource`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + is_stateless: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveGatherV2", name, input, group_size, group_key, + instance_key, ordering_token, "communication_hint", + communication_hint, "timeout_seconds", timeout_seconds, + "is_stateless", is_stateless) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_gather_v2_eager_fallback( + input, group_size, group_key, instance_key, ordering_token, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, is_stateless=is_stateless, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_gather_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveGatherV2", input=input, group_size=group_size, + group_key=group_key, instance_key=instance_key, + ordering_token=ordering_token, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, + is_stateless=is_stateless, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds"), "is_stateless", + _op._get_attr_bool("is_stateless"), "Nordering_token", + _op._get_attr_int("Nordering_token")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveGatherV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveGatherV2 = tf_export("raw_ops.CollectiveGatherV2")(_ops.to_raw_op(collective_gather_v2)) + + +def collective_gather_v2_eager_fallback(input: Annotated[Any, TV_CollectiveGatherV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], communication_hint: str, timeout_seconds: float, is_stateless: bool, name, ctx) -> Annotated[Any, TV_CollectiveGatherV2_T]: + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_gather_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource) + _inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token) + _attrs = ("T", _attr_T, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds, "is_stateless", is_stateless, + "Nordering_token", _attr_Nordering_token) + _result = _execute.execute(b"CollectiveGatherV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveGatherV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def collective_initialize_communicator(group_key: Annotated[Any, _atypes.Int32], rank: Annotated[Any, _atypes.Int32], group_size: Annotated[Any, _atypes.Int32], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, _atypes.Resource]: + r"""Initializes a group for collective operations. + + Args: + group_key: A `Tensor` of type `int32`. + rank: A `Tensor` of type `int32`. + group_size: A `Tensor` of type `int32`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveInitializeCommunicator", name, group_key, rank, + group_size, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_initialize_communicator_eager_fallback( + group_key, rank, group_size, communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveInitializeCommunicator", group_key=group_key, rank=rank, + group_size=group_size, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("communication_hint", _op.get_attr("communication_hint"), + "timeout_seconds", _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveInitializeCommunicator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveInitializeCommunicator = tf_export("raw_ops.CollectiveInitializeCommunicator")(_ops.to_raw_op(collective_initialize_communicator)) + + +def collective_initialize_communicator_eager_fallback(group_key: Annotated[Any, _atypes.Int32], rank: Annotated[Any, _atypes.Int32], group_size: Annotated[Any, _atypes.Int32], communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, _atypes.Resource]: + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + rank = _ops.convert_to_tensor(rank, _dtypes.int32) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + _inputs_flat = [group_key, rank, group_size] + _attrs = ("communication_hint", communication_hint, "timeout_seconds", + timeout_seconds) + _result = _execute.execute(b"CollectiveInitializeCommunicator", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveInitializeCommunicator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveReduce_T = TypeVar("TV_CollectiveReduce_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_reduce(input: Annotated[Any, TV_CollectiveReduce_T], group_size: int, group_key: int, instance_key: int, merge_op: str, final_op: str, subdiv_offsets, wait_for=[], communication_hint:str="auto", timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveReduce_T]: + r"""Mutually reduces multiple tensors of identical type and shape. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: An `int`. + group_key: An `int`. + instance_key: An `int`. + merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`. + final_op: A `string` from: `"Id", "Div"`. + subdiv_offsets: A list of `ints`. + wait_for: An optional list of `ints`. Defaults to `[]`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveReduce", name, input, "group_size", group_size, + "group_key", group_key, "instance_key", instance_key, "merge_op", + merge_op, "final_op", final_op, "subdiv_offsets", subdiv_offsets, + "wait_for", wait_for, "communication_hint", communication_hint, + "timeout_seconds", timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_reduce_eager_fallback( + input, group_size=group_size, group_key=group_key, + instance_key=instance_key, merge_op=merge_op, final_op=final_op, + subdiv_offsets=subdiv_offsets, wait_for=wait_for, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if not isinstance(subdiv_offsets, (list, tuple)): + raise TypeError( + "Expected list for 'subdiv_offsets' argument to " + "'collective_reduce' Op, not %r." % subdiv_offsets) + subdiv_offsets = [_execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets] + if wait_for is None: + wait_for = [] + if not isinstance(wait_for, (list, tuple)): + raise TypeError( + "Expected list for 'wait_for' argument to " + "'collective_reduce' Op, not %r." % wait_for) + wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for] + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveReduce", input=input, group_size=group_size, + group_key=group_key, instance_key=instance_key, + merge_op=merge_op, final_op=final_op, + subdiv_offsets=subdiv_offsets, wait_for=wait_for, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "group_size", + _op._get_attr_int("group_size"), "group_key", + _op._get_attr_int("group_key"), "instance_key", + _op._get_attr_int("instance_key"), "merge_op", + _op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"), + "subdiv_offsets", _op.get_attr("subdiv_offsets"), "wait_for", + _op.get_attr("wait_for"), "communication_hint", + _op.get_attr("communication_hint"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveReduce = tf_export("raw_ops.CollectiveReduce")(_ops.to_raw_op(collective_reduce)) + + +def collective_reduce_eager_fallback(input: Annotated[Any, TV_CollectiveReduce_T], group_size: int, group_key: int, instance_key: int, merge_op: str, final_op: str, subdiv_offsets, wait_for, communication_hint: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveReduce_T]: + group_size = _execute.make_int(group_size, "group_size") + group_key = _execute.make_int(group_key, "group_key") + instance_key = _execute.make_int(instance_key, "instance_key") + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if not isinstance(subdiv_offsets, (list, tuple)): + raise TypeError( + "Expected list for 'subdiv_offsets' argument to " + "'collective_reduce' Op, not %r." % subdiv_offsets) + subdiv_offsets = [_execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets] + if wait_for is None: + wait_for = [] + if not isinstance(wait_for, (list, tuple)): + raise TypeError( + "Expected list for 'wait_for' argument to " + "'collective_reduce' Op, not %r." % wait_for) + wait_for = [_execute.make_int(_i, "wait_for") for _i in wait_for] + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "group_size", group_size, "group_key", group_key, + "instance_key", instance_key, "merge_op", merge_op, "final_op", final_op, + "subdiv_offsets", subdiv_offsets, "wait_for", wait_for, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds) + _result = _execute.execute(b"CollectiveReduce", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveReduceScatterV2_T = TypeVar("TV_CollectiveReduceScatterV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_reduce_scatter_v2(input: Annotated[Any, TV_CollectiveReduceScatterV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, max_subdivs_per_device:int=-1, name=None) -> Annotated[Any, TV_CollectiveReduceScatterV2_T]: + r"""Mutually reduces multiple tensors of identical type and shape and scatters the result. + + `is_stateless` means each op does not need control dependencies to other + collective ops. In this case, keys that are unique at runtime + (e.g. `instance_key`) should be used to distinguish collective groups. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + ordering_token: A list of `Tensor` objects with type `resource`. + merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`. + final_op: A `string` from: `"Id", "Div"`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + is_stateless: An optional `bool`. Defaults to `False`. + max_subdivs_per_device: An optional `int`. Defaults to `-1`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveReduceScatterV2", name, input, group_size, group_key, + instance_key, ordering_token, "merge_op", merge_op, "final_op", + final_op, "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds, "is_stateless", is_stateless, + "max_subdivs_per_device", max_subdivs_per_device) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_reduce_scatter_v2_eager_fallback( + input, group_size, group_key, instance_key, ordering_token, + merge_op=merge_op, final_op=final_op, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, is_stateless=is_stateless, + max_subdivs_per_device=max_subdivs_per_device, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_reduce_scatter_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + if max_subdivs_per_device is None: + max_subdivs_per_device = -1 + max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveReduceScatterV2", input=input, group_size=group_size, + group_key=group_key, + instance_key=instance_key, + ordering_token=ordering_token, + merge_op=merge_op, final_op=final_op, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, + is_stateless=is_stateless, + max_subdivs_per_device=max_subdivs_per_device, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "merge_op", + _op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"), + "communication_hint", _op.get_attr("communication_hint"), + "timeout_seconds", _op.get_attr("timeout_seconds"), + "is_stateless", _op._get_attr_bool("is_stateless"), + "Nordering_token", _op._get_attr_int("Nordering_token"), + "max_subdivs_per_device", + _op._get_attr_int("max_subdivs_per_device")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveReduceScatterV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveReduceScatterV2 = tf_export("raw_ops.CollectiveReduceScatterV2")(_ops.to_raw_op(collective_reduce_scatter_v2)) + + +def collective_reduce_scatter_v2_eager_fallback(input: Annotated[Any, TV_CollectiveReduceScatterV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint: str, timeout_seconds: float, is_stateless: bool, max_subdivs_per_device: int, name, ctx) -> Annotated[Any, TV_CollectiveReduceScatterV2_T]: + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_reduce_scatter_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + if max_subdivs_per_device is None: + max_subdivs_per_device = -1 + max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource) + _inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token) + _attrs = ("T", _attr_T, "merge_op", merge_op, "final_op", final_op, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds, "is_stateless", is_stateless, "Nordering_token", + _attr_Nordering_token, "max_subdivs_per_device", max_subdivs_per_device) + _result = _execute.execute(b"CollectiveReduceScatterV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveReduceScatterV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveReduceV2_T = TypeVar("TV_CollectiveReduceV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_reduce_v2(input: Annotated[Any, TV_CollectiveReduceV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint:str="auto", timeout_seconds:float=0, is_stateless:bool=False, max_subdivs_per_device:int=-1, name=None) -> Annotated[Any, TV_CollectiveReduceV2_T]: + r"""Mutually reduces multiple tensors of identical type and shape. + + `is_stateless` means each op does not need control dependencies to other + collective ops. In this case, keys that are unique at runtime + (e.g. `instance_key`) should be used to distinguish collective groups. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + group_size: A `Tensor` of type `int32`. + group_key: A `Tensor` of type `int32`. + instance_key: A `Tensor` of type `int32`. + ordering_token: A list of `Tensor` objects with type `resource`. + merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`. + final_op: A `string` from: `"Id", "Div"`. + communication_hint: An optional `string`. Defaults to `"auto"`. + timeout_seconds: An optional `float`. Defaults to `0`. + is_stateless: An optional `bool`. Defaults to `False`. + max_subdivs_per_device: An optional `int`. Defaults to `-1`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveReduceV2", name, input, group_size, group_key, + instance_key, ordering_token, "merge_op", merge_op, "final_op", + final_op, "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds, "is_stateless", is_stateless, + "max_subdivs_per_device", max_subdivs_per_device) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_reduce_v2_eager_fallback( + input, group_size, group_key, instance_key, ordering_token, + merge_op=merge_op, final_op=final_op, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, is_stateless=is_stateless, + max_subdivs_per_device=max_subdivs_per_device, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_reduce_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + if max_subdivs_per_device is None: + max_subdivs_per_device = -1 + max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveReduceV2", input=input, group_size=group_size, + group_key=group_key, instance_key=instance_key, + ordering_token=ordering_token, + merge_op=merge_op, final_op=final_op, + communication_hint=communication_hint, + timeout_seconds=timeout_seconds, + is_stateless=is_stateless, + max_subdivs_per_device=max_subdivs_per_device, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "merge_op", + _op.get_attr("merge_op"), "final_op", _op.get_attr("final_op"), + "communication_hint", _op.get_attr("communication_hint"), + "timeout_seconds", _op.get_attr("timeout_seconds"), + "is_stateless", _op._get_attr_bool("is_stateless"), + "Nordering_token", _op._get_attr_int("Nordering_token"), + "max_subdivs_per_device", + _op._get_attr_int("max_subdivs_per_device")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveReduceV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveReduceV2 = tf_export("raw_ops.CollectiveReduceV2")(_ops.to_raw_op(collective_reduce_v2)) + + +def collective_reduce_v2_eager_fallback(input: Annotated[Any, TV_CollectiveReduceV2_T], group_size: Annotated[Any, _atypes.Int32], group_key: Annotated[Any, _atypes.Int32], instance_key: Annotated[Any, _atypes.Int32], ordering_token: Annotated[List[Any], _atypes.Resource], merge_op: str, final_op: str, communication_hint: str, timeout_seconds: float, is_stateless: bool, max_subdivs_per_device: int, name, ctx) -> Annotated[Any, TV_CollectiveReduceV2_T]: + if not isinstance(ordering_token, (list, tuple)): + raise TypeError( + "Expected list for 'ordering_token' argument to " + "'collective_reduce_v2' Op, not %r." % ordering_token) + _attr_Nordering_token = len(ordering_token) + merge_op = _execute.make_str(merge_op, "merge_op") + final_op = _execute.make_str(final_op, "final_op") + if communication_hint is None: + communication_hint = "auto" + communication_hint = _execute.make_str(communication_hint, "communication_hint") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + if is_stateless is None: + is_stateless = False + is_stateless = _execute.make_bool(is_stateless, "is_stateless") + if max_subdivs_per_device is None: + max_subdivs_per_device = -1 + max_subdivs_per_device = _execute.make_int(max_subdivs_per_device, "max_subdivs_per_device") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + group_size = _ops.convert_to_tensor(group_size, _dtypes.int32) + group_key = _ops.convert_to_tensor(group_key, _dtypes.int32) + instance_key = _ops.convert_to_tensor(instance_key, _dtypes.int32) + ordering_token = _ops.convert_n_to_tensor(ordering_token, _dtypes.resource) + _inputs_flat = [input, group_size, group_key, instance_key] + list(ordering_token) + _attrs = ("T", _attr_T, "merge_op", merge_op, "final_op", final_op, + "communication_hint", communication_hint, "timeout_seconds", + timeout_seconds, "is_stateless", is_stateless, "Nordering_token", + _attr_Nordering_token, "max_subdivs_per_device", max_subdivs_per_device) + _result = _execute.execute(b"CollectiveReduceV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveReduceV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CollectiveReduceV3_T = TypeVar("TV_CollectiveReduceV3_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def collective_reduce_v3(input: Annotated[Any, TV_CollectiveReduceV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], reduction: str, timeout_seconds:float=0, name=None) -> Annotated[Any, TV_CollectiveReduceV3_T]: + r"""Mutually reduces multiple tensors of identical type and shape. + + Args: + input: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`. + communicator: A `Tensor` of type `resource`. + group_assignment: A `Tensor` of type `int32`. + reduction: A `string` from: `"Min", "Max", "Mul", "Add"`. + timeout_seconds: An optional `float`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectiveReduceV3", name, input, communicator, + group_assignment, "reduction", reduction, "timeout_seconds", + timeout_seconds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_reduce_v3_eager_fallback( + input, communicator, group_assignment, reduction=reduction, + timeout_seconds=timeout_seconds, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + reduction = _execute.make_str(reduction, "reduction") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectiveReduceV3", input=input, communicator=communicator, + group_assignment=group_assignment, + reduction=reduction, + timeout_seconds=timeout_seconds, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "reduction", + _op.get_attr("reduction"), "timeout_seconds", + _op.get_attr("timeout_seconds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectiveReduceV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectiveReduceV3 = tf_export("raw_ops.CollectiveReduceV3")(_ops.to_raw_op(collective_reduce_v3)) + + +def collective_reduce_v3_eager_fallback(input: Annotated[Any, TV_CollectiveReduceV3_T], communicator: Annotated[Any, _atypes.Resource], group_assignment: Annotated[Any, _atypes.Int32], reduction: str, timeout_seconds: float, name, ctx) -> Annotated[Any, TV_CollectiveReduceV3_T]: + reduction = _execute.make_str(reduction, "reduction") + if timeout_seconds is None: + timeout_seconds = 0 + timeout_seconds = _execute.make_float(timeout_seconds, "timeout_seconds") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + communicator = _ops.convert_to_tensor(communicator, _dtypes.resource) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + _inputs_flat = [input, communicator, group_assignment] + _attrs = ("T", _attr_T, "reduction", reduction, "timeout_seconds", + timeout_seconds) + _result = _execute.execute(b"CollectiveReduceV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectiveReduceV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_composite_tensor_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_composite_tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..189c7e6f4ea0cfc2da4d592f67cf1c18291d09e8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_composite_tensor_ops.py @@ -0,0 +1,172 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def composite_tensor_variant_from_components(components, metadata: str, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Encodes an `ExtensionType` value into a `variant` scalar Tensor. + + Returns a scalar variant tensor containing a single `CompositeTensorVariant` + with the specified Tensor components and TypeSpec. + + Args: + components: A list of `Tensor` objects. + The component tensors for the extension type value. + metadata: A `string`. + String serialization for the TypeSpec. (Note: the encoding for the TypeSpec + may change in future versions of TensorFlow.) + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CompositeTensorVariantFromComponents", name, components, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return composite_tensor_variant_from_components_eager_fallback( + components, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CompositeTensorVariantFromComponents", components=components, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata"), "Tcomponents", + _op.get_attr("Tcomponents")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CompositeTensorVariantFromComponents", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CompositeTensorVariantFromComponents = tf_export("raw_ops.CompositeTensorVariantFromComponents")(_ops.to_raw_op(composite_tensor_variant_from_components)) + + +def composite_tensor_variant_from_components_eager_fallback(components, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + metadata = _execute.make_str(metadata, "metadata") + _attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, ctx) + _inputs_flat = list(components) + _attrs = ("metadata", metadata, "Tcomponents", _attr_Tcomponents) + _result = _execute.execute(b"CompositeTensorVariantFromComponents", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CompositeTensorVariantFromComponents", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def composite_tensor_variant_to_components(encoded: Annotated[Any, _atypes.Variant], metadata: str, Tcomponents, name=None): + r"""Decodes a `variant` scalar Tensor into an `ExtensionType` value. + + Returns the Tensor components encoded in a `CompositeTensorVariant`. + + Raises an error if `type_spec_proto` doesn't match the TypeSpec + in `encoded`. + + Args: + encoded: A `Tensor` of type `variant`. + A scalar `variant` Tensor containing an encoded ExtensionType value. + metadata: A `string`. + String serialization for the TypeSpec. Must be compatible with the + `TypeSpec` contained in `encoded`. (Note: the encoding for the TypeSpec + may change in future versions of TensorFlow.) + Tcomponents: A list of `tf.DTypes`. Expected dtypes for components. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tcomponents`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CompositeTensorVariantToComponents", name, encoded, "metadata", + metadata, "Tcomponents", Tcomponents) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return composite_tensor_variant_to_components_eager_fallback( + encoded, metadata=metadata, Tcomponents=Tcomponents, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + metadata = _execute.make_str(metadata, "metadata") + if not isinstance(Tcomponents, (list, tuple)): + raise TypeError( + "Expected list for 'Tcomponents' argument to " + "'composite_tensor_variant_to_components' Op, not %r." % Tcomponents) + Tcomponents = [_execute.make_type(_t, "Tcomponents") for _t in Tcomponents] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CompositeTensorVariantToComponents", encoded=encoded, + metadata=metadata, + Tcomponents=Tcomponents, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata"), "Tcomponents", + _op.get_attr("Tcomponents")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CompositeTensorVariantToComponents", _inputs_flat, _attrs, _result) + return _result + +CompositeTensorVariantToComponents = tf_export("raw_ops.CompositeTensorVariantToComponents")(_ops.to_raw_op(composite_tensor_variant_to_components)) + + +def composite_tensor_variant_to_components_eager_fallback(encoded: Annotated[Any, _atypes.Variant], metadata: str, Tcomponents, name, ctx): + metadata = _execute.make_str(metadata, "metadata") + if not isinstance(Tcomponents, (list, tuple)): + raise TypeError( + "Expected list for 'Tcomponents' argument to " + "'composite_tensor_variant_to_components' Op, not %r." % Tcomponents) + Tcomponents = [_execute.make_type(_t, "Tcomponents") for _t in Tcomponents] + encoded = _ops.convert_to_tensor(encoded, _dtypes.variant) + _inputs_flat = [encoded] + _attrs = ("metadata", metadata, "Tcomponents", Tcomponents) + _result = _execute.execute(b"CompositeTensorVariantToComponents", + len(Tcomponents), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CompositeTensorVariantToComponents", _inputs_flat, _attrs, _result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..575ec77917bc8cf1bbeee5629051ef0401cf0666 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py @@ -0,0 +1,8372 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def anonymous_iterator(output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""A container for an iterator resource. + + Args: + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousIterator", name, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_iterator_eager_fallback( + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousIterator", output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AnonymousIterator = tf_export("raw_ops.AnonymousIterator")(_ops.to_raw_op(anonymous_iterator)) + + +def anonymous_iterator_eager_fallback(output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AnonymousIterator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_AnonymousIteratorV2Output = collections.namedtuple( + "AnonymousIteratorV2", + ["handle", "deleter"]) + + +def anonymous_iterator_v2(output_types, output_shapes, name=None): + r"""A container for an iterator resource. + + Args: + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (handle, deleter). + + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousIteratorV2", name, "output_types", output_types, + "output_shapes", output_shapes) + _result = _AnonymousIteratorV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_iterator_v2_eager_fallback( + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousIteratorV2", output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousIteratorV2", _inputs_flat, _attrs, _result) + _result = _AnonymousIteratorV2Output._make(_result) + return _result + +AnonymousIteratorV2 = tf_export("raw_ops.AnonymousIteratorV2")(_ops.to_raw_op(anonymous_iterator_v2)) + + +def anonymous_iterator_v2_eager_fallback(output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AnonymousIteratorV2", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousIteratorV2", _inputs_flat, _attrs, _result) + _result = _AnonymousIteratorV2Output._make(_result) + return _result + + +def anonymous_iterator_v3(output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""A container for an iterator resource. + + Args: + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousIteratorV3", name, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_iterator_v3_eager_fallback( + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousIteratorV3", output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousIteratorV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AnonymousIteratorV3 = tf_export("raw_ops.AnonymousIteratorV3")(_ops.to_raw_op(anonymous_iterator_v3)) + + +def anonymous_iterator_v3_eager_fallback(output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_iterator_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_iterator_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AnonymousIteratorV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousIteratorV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_AnonymousMemoryCacheOutput = collections.namedtuple( + "AnonymousMemoryCache", + ["handle", "deleter"]) + + +def anonymous_memory_cache(name=None): + r"""TODO: add doc. + + Args: + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (handle, deleter). + + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousMemoryCache", name) + _result = _AnonymousMemoryCacheOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_memory_cache_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousMemoryCache", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousMemoryCache", _inputs_flat, _attrs, _result) + _result = _AnonymousMemoryCacheOutput._make(_result) + return _result + +AnonymousMemoryCache = tf_export("raw_ops.AnonymousMemoryCache")(_ops.to_raw_op(anonymous_memory_cache)) + + +def anonymous_memory_cache_eager_fallback(name, ctx): + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"AnonymousMemoryCache", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousMemoryCache", _inputs_flat, _attrs, _result) + _result = _AnonymousMemoryCacheOutput._make(_result) + return _result + +_AnonymousMultiDeviceIteratorOutput = collections.namedtuple( + "AnonymousMultiDeviceIterator", + ["handle", "deleter"]) + + +def anonymous_multi_device_iterator(devices, output_types, output_shapes, name=None): + r"""A container for a multi device iterator resource. + + Args: + devices: A list of `strings` that has length `>= 1`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (handle, deleter). + + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousMultiDeviceIterator", name, "devices", devices, + "output_types", output_types, "output_shapes", output_shapes) + _result = _AnonymousMultiDeviceIteratorOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_multi_device_iterator_eager_fallback( + devices=devices, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousMultiDeviceIterator", devices=devices, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("devices", _op.get_attr("devices"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousMultiDeviceIterator", _inputs_flat, _attrs, _result) + _result = _AnonymousMultiDeviceIteratorOutput._make(_result) + return _result + +AnonymousMultiDeviceIterator = tf_export("raw_ops.AnonymousMultiDeviceIterator")(_ops.to_raw_op(anonymous_multi_device_iterator)) + + +def anonymous_multi_device_iterator_eager_fallback(devices, output_types, output_shapes, name, ctx): + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_multi_device_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("devices", devices, "output_types", output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"AnonymousMultiDeviceIterator", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousMultiDeviceIterator", _inputs_flat, _attrs, _result) + _result = _AnonymousMultiDeviceIteratorOutput._make(_result) + return _result + + +def anonymous_multi_device_iterator_v3(devices, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""A container for a multi device iterator resource. + + Args: + devices: A list of `strings` that has length `>= 1`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousMultiDeviceIteratorV3", name, "devices", devices, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_multi_device_iterator_v3_eager_fallback( + devices=devices, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousMultiDeviceIteratorV3", devices=devices, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("devices", _op.get_attr("devices"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousMultiDeviceIteratorV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AnonymousMultiDeviceIteratorV3 = tf_export("raw_ops.AnonymousMultiDeviceIteratorV3")(_ops.to_raw_op(anonymous_multi_device_iterator_v3)) + + +def anonymous_multi_device_iterator_v3_eager_fallback(devices, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'anonymous_multi_device_iterator_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("devices", devices, "output_types", output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"AnonymousMultiDeviceIteratorV3", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousMultiDeviceIteratorV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_AnonymousRandomSeedGeneratorOutput = collections.namedtuple( + "AnonymousRandomSeedGenerator", + ["handle", "deleter"]) + + +def anonymous_random_seed_generator(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], name=None): + r"""TODO: add doc. + + Args: + seed: A `Tensor` of type `int64`. + seed2: A `Tensor` of type `int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (handle, deleter). + + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousRandomSeedGenerator", name, seed, seed2) + _result = _AnonymousRandomSeedGeneratorOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_random_seed_generator_eager_fallback( + seed, seed2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousRandomSeedGenerator", seed=seed, seed2=seed2, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousRandomSeedGenerator", _inputs_flat, _attrs, _result) + _result = _AnonymousRandomSeedGeneratorOutput._make(_result) + return _result + +AnonymousRandomSeedGenerator = tf_export("raw_ops.AnonymousRandomSeedGenerator")(_ops.to_raw_op(anonymous_random_seed_generator)) + + +def anonymous_random_seed_generator_eager_fallback(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], name, ctx): + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + _inputs_flat = [seed, seed2] + _attrs = None + _result = _execute.execute(b"AnonymousRandomSeedGenerator", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousRandomSeedGenerator", _inputs_flat, _attrs, _result) + _result = _AnonymousRandomSeedGeneratorOutput._make(_result) + return _result + +_AnonymousSeedGeneratorOutput = collections.namedtuple( + "AnonymousSeedGenerator", + ["handle", "deleter"]) + + +def anonymous_seed_generator(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], reshuffle: Annotated[Any, _atypes.Bool], name=None): + r"""TODO: add doc. + + Args: + seed: A `Tensor` of type `int64`. + seed2: A `Tensor` of type `int64`. + reshuffle: A `Tensor` of type `bool`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (handle, deleter). + + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AnonymousSeedGenerator", name, seed, seed2, reshuffle) + _result = _AnonymousSeedGeneratorOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return anonymous_seed_generator_eager_fallback( + seed, seed2, reshuffle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AnonymousSeedGenerator", seed=seed, seed2=seed2, reshuffle=reshuffle, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "AnonymousSeedGenerator", _inputs_flat, _attrs, _result) + _result = _AnonymousSeedGeneratorOutput._make(_result) + return _result + +AnonymousSeedGenerator = tf_export("raw_ops.AnonymousSeedGenerator")(_ops.to_raw_op(anonymous_seed_generator)) + + +def anonymous_seed_generator_eager_fallback(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], reshuffle: Annotated[Any, _atypes.Bool], name, ctx): + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + reshuffle = _ops.convert_to_tensor(reshuffle, _dtypes.bool) + _inputs_flat = [seed, seed2, reshuffle] + _attrs = None + _result = _execute.execute(b"AnonymousSeedGenerator", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AnonymousSeedGenerator", _inputs_flat, _attrs, _result) + _result = _AnonymousSeedGeneratorOutput._make(_result) + return _result + + +def batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches `batch_size` elements from `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchDataset", name, input_dataset, batch_size, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_dataset_eager_fallback( + input_dataset, batch_size, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchDataset", input_dataset=input_dataset, batch_size=batch_size, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BatchDataset = tf_export("raw_ops.BatchDataset")(_ops.to_raw_op(batch_dataset)) + + +def batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + _inputs_flat = [input_dataset, batch_size] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"BatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def batch_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, parallel_copy:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches `batch_size` elements from `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a batch. + drop_remainder: A `Tensor` of type `bool`. + A scalar representing whether the last batch should be dropped in case its size + is smaller than desired. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + parallel_copy: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchDatasetV2", name, input_dataset, batch_size, + drop_remainder, "parallel_copy", parallel_copy, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_dataset_v2_eager_fallback( + input_dataset, batch_size, drop_remainder, + parallel_copy=parallel_copy, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'batch_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'batch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchDatasetV2", input_dataset=input_dataset, batch_size=batch_size, + drop_remainder=drop_remainder, + output_types=output_types, + output_shapes=output_shapes, + parallel_copy=parallel_copy, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("parallel_copy", _op._get_attr_bool("parallel_copy"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BatchDatasetV2 = tf_export("raw_ops.BatchDatasetV2")(_ops.to_raw_op(batch_dataset_v2)) + + +def batch_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, parallel_copy: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'batch_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'batch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset, batch_size, drop_remainder] + _attrs = ("parallel_copy", parallel_copy, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"BatchDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def cache_dataset(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that caches elements from `input_dataset`. + + A CacheDataset will iterate over the input_dataset, and store tensors. If the + cache already exists, the cache will be used. If the cache is inappropriate + (e.g. cannot be opened, contains tensors of the wrong shape / size), an error + will the returned when used. + + Args: + input_dataset: A `Tensor` of type `variant`. + filename: A `Tensor` of type `string`. + A path on the filesystem where we should cache the dataset. Note: this + will be a directory. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CacheDataset", name, input_dataset, filename, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cache_dataset_eager_fallback( + input_dataset, filename, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'cache_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'cache_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CacheDataset", input_dataset=input_dataset, filename=filename, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CacheDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CacheDataset = tf_export("raw_ops.CacheDataset")(_ops.to_raw_op(cache_dataset)) + + +def cache_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'cache_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'cache_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + filename = _ops.convert_to_tensor(filename, _dtypes.string) + _inputs_flat = [input_dataset, filename] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"CacheDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CacheDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def cache_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], cache: Annotated[Any, _atypes.Resource], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + filename: A `Tensor` of type `string`. + cache: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CacheDatasetV2", name, input_dataset, filename, cache, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cache_dataset_v2_eager_fallback( + input_dataset, filename, cache, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'cache_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'cache_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CacheDatasetV2", input_dataset=input_dataset, filename=filename, + cache=cache, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CacheDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CacheDatasetV2 = tf_export("raw_ops.CacheDatasetV2")(_ops.to_raw_op(cache_dataset_v2)) + + +def cache_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], cache: Annotated[Any, _atypes.Resource], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'cache_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'cache_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + filename = _ops.convert_to_tensor(filename, _dtypes.string) + cache = _ops.convert_to_tensor(cache, _dtypes.resource) + _inputs_flat = [input_dataset, filename, cache] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"CacheDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CacheDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def concatenate_dataset(input_dataset: Annotated[Any, _atypes.Variant], another_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that concatenates `input_dataset` with `another_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + another_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ConcatenateDataset", name, input_dataset, another_dataset, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return concatenate_dataset_eager_fallback( + input_dataset, another_dataset, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'concatenate_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'concatenate_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ConcatenateDataset", input_dataset=input_dataset, + another_dataset=another_dataset, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ConcatenateDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ConcatenateDataset = tf_export("raw_ops.ConcatenateDataset")(_ops.to_raw_op(concatenate_dataset)) + + +def concatenate_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], another_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'concatenate_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'concatenate_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + another_dataset = _ops.convert_to_tensor(another_dataset, _dtypes.variant) + _inputs_flat = [input_dataset, another_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"ConcatenateDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ConcatenateDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_cardinality(input_dataset: Annotated[Any, _atypes.Variant], cardinality_options:str="", name=None) -> Annotated[Any, _atypes.Int64]: + r"""Returns the cardinality of `input_dataset`. + + Returns the cardinality of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to return cardinality for. + cardinality_options: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetCardinality", name, input_dataset, + "cardinality_options", cardinality_options) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_cardinality_eager_fallback( + input_dataset, cardinality_options=cardinality_options, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if cardinality_options is None: + cardinality_options = "" + cardinality_options = _execute.make_str(cardinality_options, "cardinality_options") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetCardinality", input_dataset=input_dataset, + cardinality_options=cardinality_options, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("cardinality_options", _op.get_attr("cardinality_options")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetCardinality", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DatasetCardinality = tf_export("raw_ops.DatasetCardinality")(_ops.to_raw_op(dataset_cardinality)) + + +def dataset_cardinality_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], cardinality_options: str, name, ctx) -> Annotated[Any, _atypes.Int64]: + if cardinality_options is None: + cardinality_options = "" + cardinality_options = _execute.make_str(cardinality_options, "cardinality_options") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("cardinality_options", cardinality_options) + _result = _execute.execute(b"DatasetCardinality", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetCardinality", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_fingerprint(input_dataset: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.UInt64]: + r"""Returns the fingerprint of `input_dataset`. + + Returns the fingerprint of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to return fingerprint for. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `uint64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetFingerprint", name, input_dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_fingerprint_eager_fallback( + input_dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetFingerprint", input_dataset=input_dataset, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetFingerprint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DatasetFingerprint = tf_export("raw_ops.DatasetFingerprint")(_ops.to_raw_op(dataset_fingerprint)) + + +def dataset_fingerprint_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.UInt64]: + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = None + _result = _execute.execute(b"DatasetFingerprint", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetFingerprint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_to_graph(input_dataset: Annotated[Any, _atypes.Variant], stateful_whitelist=[], allow_stateful:bool=False, strip_device_assignment:bool=False, name=None) -> Annotated[Any, _atypes.String]: + r"""Returns a serialized GraphDef representing `input_dataset`. + + Returns a graph representation for `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to return the graph representation for. + stateful_whitelist: An optional list of `strings`. Defaults to `[]`. + allow_stateful: An optional `bool`. Defaults to `False`. + strip_device_assignment: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetToGraph", name, input_dataset, "stateful_whitelist", + stateful_whitelist, "allow_stateful", allow_stateful, + "strip_device_assignment", strip_device_assignment) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_to_graph_eager_fallback( + input_dataset, stateful_whitelist=stateful_whitelist, + allow_stateful=allow_stateful, + strip_device_assignment=strip_device_assignment, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if stateful_whitelist is None: + stateful_whitelist = [] + if not isinstance(stateful_whitelist, (list, tuple)): + raise TypeError( + "Expected list for 'stateful_whitelist' argument to " + "'dataset_to_graph' Op, not %r." % stateful_whitelist) + stateful_whitelist = [_execute.make_str(_s, "stateful_whitelist") for _s in stateful_whitelist] + if allow_stateful is None: + allow_stateful = False + allow_stateful = _execute.make_bool(allow_stateful, "allow_stateful") + if strip_device_assignment is None: + strip_device_assignment = False + strip_device_assignment = _execute.make_bool(strip_device_assignment, "strip_device_assignment") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetToGraph", input_dataset=input_dataset, + stateful_whitelist=stateful_whitelist, + allow_stateful=allow_stateful, + strip_device_assignment=strip_device_assignment, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("stateful_whitelist", _op.get_attr("stateful_whitelist"), + "allow_stateful", _op._get_attr_bool("allow_stateful"), + "strip_device_assignment", + _op._get_attr_bool("strip_device_assignment")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetToGraph", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DatasetToGraph = tf_export("raw_ops.DatasetToGraph")(_ops.to_raw_op(dataset_to_graph)) + + +def dataset_to_graph_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], stateful_whitelist, allow_stateful: bool, strip_device_assignment: bool, name, ctx) -> Annotated[Any, _atypes.String]: + if stateful_whitelist is None: + stateful_whitelist = [] + if not isinstance(stateful_whitelist, (list, tuple)): + raise TypeError( + "Expected list for 'stateful_whitelist' argument to " + "'dataset_to_graph' Op, not %r." % stateful_whitelist) + stateful_whitelist = [_execute.make_str(_s, "stateful_whitelist") for _s in stateful_whitelist] + if allow_stateful is None: + allow_stateful = False + allow_stateful = _execute.make_bool(allow_stateful, "allow_stateful") + if strip_device_assignment is None: + strip_device_assignment = False + strip_device_assignment = _execute.make_bool(strip_device_assignment, "strip_device_assignment") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("stateful_whitelist", stateful_whitelist, "allow_stateful", + allow_stateful, "strip_device_assignment", strip_device_assignment) + _result = _execute.execute(b"DatasetToGraph", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetToGraph", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_to_graph_v2(input_dataset: Annotated[Any, _atypes.Variant], external_state_policy:int=0, strip_device_assignment:bool=False, name=None) -> Annotated[Any, _atypes.String]: + r"""Returns a serialized GraphDef representing `input_dataset`. + + Returns a graph representation for `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to return the graph representation for. + external_state_policy: An optional `int`. Defaults to `0`. + strip_device_assignment: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetToGraphV2", name, input_dataset, + "external_state_policy", external_state_policy, + "strip_device_assignment", strip_device_assignment) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_to_graph_v2_eager_fallback( + input_dataset, external_state_policy=external_state_policy, + strip_device_assignment=strip_device_assignment, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if external_state_policy is None: + external_state_policy = 0 + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if strip_device_assignment is None: + strip_device_assignment = False + strip_device_assignment = _execute.make_bool(strip_device_assignment, "strip_device_assignment") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetToGraphV2", input_dataset=input_dataset, + external_state_policy=external_state_policy, + strip_device_assignment=strip_device_assignment, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("external_state_policy", + _op._get_attr_int("external_state_policy"), + "strip_device_assignment", + _op._get_attr_bool("strip_device_assignment")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetToGraphV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DatasetToGraphV2 = tf_export("raw_ops.DatasetToGraphV2")(_ops.to_raw_op(dataset_to_graph_v2)) + + +def dataset_to_graph_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], external_state_policy: int, strip_device_assignment: bool, name, ctx) -> Annotated[Any, _atypes.String]: + if external_state_policy is None: + external_state_policy = 0 + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if strip_device_assignment is None: + strip_device_assignment = False + strip_device_assignment = _execute.make_bool(strip_device_assignment, "strip_device_assignment") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("external_state_policy", external_state_policy, + "strip_device_assignment", strip_device_assignment) + _result = _execute.execute(b"DatasetToGraphV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetToGraphV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_to_single_element(dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata:str="", name=None): + r"""Outputs the single element from the given dataset. + + Args: + dataset: A `Tensor` of type `variant`. + A handle to a dataset that contains a single element. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetToSingleElement", name, dataset, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_to_single_element_eager_fallback( + dataset, output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'dataset_to_single_element' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'dataset_to_single_element' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetToSingleElement", dataset=dataset, output_types=output_types, + output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetToSingleElement", _inputs_flat, _attrs, _result) + return _result + +DatasetToSingleElement = tf_export("raw_ops.DatasetToSingleElement")(_ops.to_raw_op(dataset_to_single_element)) + + +def dataset_to_single_element_eager_fallback(dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata: str, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'dataset_to_single_element' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'dataset_to_single_element' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + _inputs_flat = [dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"DatasetToSingleElement", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetToSingleElement", _inputs_flat, _attrs, _result) + return _result + + +def delete_iterator(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""A container for an iterator resource. + + Args: + handle: A `Tensor` of type `resource`. A handle to the iterator to delete. + deleter: A `Tensor` of type `variant`. A variant deleter. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteIterator", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return delete_iterator_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteIterator", handle=handle, deleter=deleter, name=name) + return _op +DeleteIterator = tf_export("raw_ops.DeleteIterator")(_ops.to_raw_op(delete_iterator)) + + +def delete_iterator_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteIterator", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def delete_memory_cache(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteMemoryCache", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return delete_memory_cache_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteMemoryCache", handle=handle, deleter=deleter, name=name) + return _op +DeleteMemoryCache = tf_export("raw_ops.DeleteMemoryCache")(_ops.to_raw_op(delete_memory_cache)) + + +def delete_memory_cache_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteMemoryCache", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def delete_multi_device_iterator(multi_device_iterator: Annotated[Any, _atypes.Resource], iterators: Annotated[List[Any], _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""A container for an iterator resource. + + Args: + multi_device_iterator: A `Tensor` of type `resource`. + A handle to the multi device iterator to delete. + iterators: A list of `Tensor` objects with type `resource`. + A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted. + deleter: A `Tensor` of type `variant`. A variant deleter. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteMultiDeviceIterator", name, multi_device_iterator, + iterators, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return delete_multi_device_iterator_eager_fallback( + multi_device_iterator, iterators, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(iterators, (list, tuple)): + raise TypeError( + "Expected list for 'iterators' argument to " + "'delete_multi_device_iterator' Op, not %r." % iterators) + _attr_N = len(iterators) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteMultiDeviceIterator", multi_device_iterator=multi_device_iterator, + iterators=iterators, deleter=deleter, + name=name) + return _op +DeleteMultiDeviceIterator = tf_export("raw_ops.DeleteMultiDeviceIterator")(_ops.to_raw_op(delete_multi_device_iterator)) + + +def delete_multi_device_iterator_eager_fallback(multi_device_iterator: Annotated[Any, _atypes.Resource], iterators: Annotated[List[Any], _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + if not isinstance(iterators, (list, tuple)): + raise TypeError( + "Expected list for 'iterators' argument to " + "'delete_multi_device_iterator' Op, not %r." % iterators) + _attr_N = len(iterators) + multi_device_iterator = _ops.convert_to_tensor(multi_device_iterator, _dtypes.resource) + iterators = _ops.convert_n_to_tensor(iterators, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [multi_device_iterator] + list(iterators) + [deleter] + _attrs = ("N", _attr_N) + _result = _execute.execute(b"DeleteMultiDeviceIterator", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def delete_random_seed_generator(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteRandomSeedGenerator", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return delete_random_seed_generator_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteRandomSeedGenerator", handle=handle, deleter=deleter, + name=name) + return _op +DeleteRandomSeedGenerator = tf_export("raw_ops.DeleteRandomSeedGenerator")(_ops.to_raw_op(delete_random_seed_generator)) + + +def delete_random_seed_generator_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteRandomSeedGenerator", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def delete_seed_generator(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteSeedGenerator", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return delete_seed_generator_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteSeedGenerator", handle=handle, deleter=deleter, name=name) + return _op +DeleteSeedGenerator = tf_export("raw_ops.DeleteSeedGenerator")(_ops.to_raw_op(delete_seed_generator)) + + +def delete_seed_generator_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteSeedGenerator", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def deserialize_iterator(resource_handle: Annotated[Any, _atypes.Resource], serialized: Annotated[Any, _atypes.Variant], name=None): + r"""Converts the given variant tensor to an iterator and stores it in the given resource. + + Args: + resource_handle: A `Tensor` of type `resource`. + A handle to an iterator resource. + serialized: A `Tensor` of type `variant`. + A variant tensor storing the state of the iterator contained in the + resource. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeserializeIterator", name, resource_handle, serialized) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return deserialize_iterator_eager_fallback( + resource_handle, serialized, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeserializeIterator", resource_handle=resource_handle, + serialized=serialized, name=name) + return _op +DeserializeIterator = tf_export("raw_ops.DeserializeIterator")(_ops.to_raw_op(deserialize_iterator)) + + +def deserialize_iterator_eager_fallback(resource_handle: Annotated[Any, _atypes.Resource], serialized: Annotated[Any, _atypes.Variant], name, ctx): + resource_handle = _ops.convert_to_tensor(resource_handle, _dtypes.resource) + serialized = _ops.convert_to_tensor(serialized, _dtypes.variant) + _inputs_flat = [resource_handle, serialized] + _attrs = None + _result = _execute.execute(b"DeserializeIterator", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def dummy_memory_cache(name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DummyMemoryCache", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dummy_memory_cache_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DummyMemoryCache", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DummyMemoryCache", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DummyMemoryCache = tf_export("raw_ops.DummyMemoryCache")(_ops.to_raw_op(dummy_memory_cache)) + + +def dummy_memory_cache_eager_fallback(name, ctx) -> Annotated[Any, _atypes.Resource]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"DummyMemoryCache", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DummyMemoryCache", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dummy_seed_generator(name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DummySeedGenerator", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dummy_seed_generator_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DummySeedGenerator", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DummySeedGenerator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DummySeedGenerator = tf_export("raw_ops.DummySeedGenerator")(_ops.to_raw_op(dummy_seed_generator)) + + +def dummy_seed_generator_eager_fallback(name, ctx) -> Annotated[Any, _atypes.Resource]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"DummySeedGenerator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DummySeedGenerator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def filter_by_last_component_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset containing elements of first component of `input_dataset` having true in the last component. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FilterByLastComponentDataset", name, input_dataset, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return filter_by_last_component_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'filter_by_last_component_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'filter_by_last_component_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FilterByLastComponentDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FilterByLastComponentDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FilterByLastComponentDataset = tf_export("raw_ops.FilterByLastComponentDataset")(_ops.to_raw_op(filter_by_last_component_dataset)) + + +def filter_by_last_component_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'filter_by_last_component_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'filter_by_last_component_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"FilterByLastComponentDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FilterByLastComponentDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def filter_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset containing elements of `input_dataset` matching `predicate`. + + The `predicate` function must return a scalar boolean and accept the + following arguments: + + * One tensor for each component of an element of `input_dataset`. + * One tensor for each value in `other_arguments`. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `predicate`. + predicate: A function decorated with @Defun. + A function returning a scalar boolean. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FilterDataset", name, input_dataset, other_arguments, + "predicate", predicate, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return filter_dataset_eager_fallback( + input_dataset, other_arguments, predicate=predicate, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'filter_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'filter_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FilterDataset", input_dataset=input_dataset, + other_arguments=other_arguments, predicate=predicate, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("predicate", _op.get_attr("predicate"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FilterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FilterDataset = tf_export("raw_ops.FilterDataset")(_ops.to_raw_op(filter_dataset)) + + +def filter_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'filter_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'filter_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("predicate", predicate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"FilterDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FilterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def finalize_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, has_captured_ref:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset by applying `tf.data.Options` to `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + has_captured_ref: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FinalizeDataset", name, input_dataset, "has_captured_ref", + has_captured_ref, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return finalize_dataset_eager_fallback( + input_dataset, has_captured_ref=has_captured_ref, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'finalize_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'finalize_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if has_captured_ref is None: + has_captured_ref = False + has_captured_ref = _execute.make_bool(has_captured_ref, "has_captured_ref") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FinalizeDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, + has_captured_ref=has_captured_ref, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("has_captured_ref", _op._get_attr_bool("has_captured_ref"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FinalizeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FinalizeDataset = tf_export("raw_ops.FinalizeDataset")(_ops.to_raw_op(finalize_dataset)) + + +def finalize_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, has_captured_ref: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'finalize_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'finalize_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if has_captured_ref is None: + has_captured_ref = False + has_captured_ref = _execute.make_bool(has_captured_ref, "has_captured_ref") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("has_captured_ref", has_captured_ref, "output_types", + output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"FinalizeDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FinalizeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def fixed_length_record_dataset(filenames: Annotated[Any, _atypes.String], header_bytes: Annotated[Any, _atypes.Int64], record_bytes: Annotated[Any, _atypes.Int64], footer_bytes: Annotated[Any, _atypes.Int64], buffer_size: Annotated[Any, _atypes.Int64], metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the records from one or more binary files. + + Args: + filenames: A `Tensor` of type `string`. + A scalar or a vector containing the name(s) of the file(s) to be + read. + header_bytes: A `Tensor` of type `int64`. + A scalar representing the number of bytes to skip at the + beginning of a file. + record_bytes: A `Tensor` of type `int64`. + A scalar representing the number of bytes in each record. + footer_bytes: A `Tensor` of type `int64`. + A scalar representing the number of bytes to skip at the end + of a file. + buffer_size: A `Tensor` of type `int64`. + A scalar representing the number of bytes to buffer. Must be > 0. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FixedLengthRecordDataset", name, filenames, header_bytes, + record_bytes, footer_bytes, buffer_size, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return fixed_length_record_dataset_eager_fallback( + filenames, header_bytes, record_bytes, footer_bytes, buffer_size, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FixedLengthRecordDataset", filenames=filenames, + header_bytes=header_bytes, + record_bytes=record_bytes, + footer_bytes=footer_bytes, + buffer_size=buffer_size, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FixedLengthRecordDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FixedLengthRecordDataset = tf_export("raw_ops.FixedLengthRecordDataset")(_ops.to_raw_op(fixed_length_record_dataset)) + + +def fixed_length_record_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], header_bytes: Annotated[Any, _atypes.Int64], record_bytes: Annotated[Any, _atypes.Int64], footer_bytes: Annotated[Any, _atypes.Int64], buffer_size: Annotated[Any, _atypes.Int64], metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + header_bytes = _ops.convert_to_tensor(header_bytes, _dtypes.int64) + record_bytes = _ops.convert_to_tensor(record_bytes, _dtypes.int64) + footer_bytes = _ops.convert_to_tensor(footer_bytes, _dtypes.int64) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + _inputs_flat = [filenames, header_bytes, record_bytes, footer_bytes, buffer_size] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"FixedLengthRecordDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FixedLengthRecordDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def fixed_length_record_dataset_v2(filenames: Annotated[Any, _atypes.String], header_bytes: Annotated[Any, _atypes.Int64], record_bytes: Annotated[Any, _atypes.Int64], footer_bytes: Annotated[Any, _atypes.Int64], buffer_size: Annotated[Any, _atypes.Int64], compression_type: Annotated[Any, _atypes.String], metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + filenames: A `Tensor` of type `string`. + header_bytes: A `Tensor` of type `int64`. + record_bytes: A `Tensor` of type `int64`. + footer_bytes: A `Tensor` of type `int64`. + buffer_size: A `Tensor` of type `int64`. + compression_type: A `Tensor` of type `string`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FixedLengthRecordDatasetV2", name, filenames, header_bytes, + record_bytes, footer_bytes, buffer_size, compression_type, "metadata", + metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return fixed_length_record_dataset_v2_eager_fallback( + filenames, header_bytes, record_bytes, footer_bytes, buffer_size, + compression_type, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FixedLengthRecordDatasetV2", filenames=filenames, + header_bytes=header_bytes, + record_bytes=record_bytes, + footer_bytes=footer_bytes, + buffer_size=buffer_size, + compression_type=compression_type, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FixedLengthRecordDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FixedLengthRecordDatasetV2 = tf_export("raw_ops.FixedLengthRecordDatasetV2")(_ops.to_raw_op(fixed_length_record_dataset_v2)) + + +def fixed_length_record_dataset_v2_eager_fallback(filenames: Annotated[Any, _atypes.String], header_bytes: Annotated[Any, _atypes.Int64], record_bytes: Annotated[Any, _atypes.Int64], footer_bytes: Annotated[Any, _atypes.Int64], buffer_size: Annotated[Any, _atypes.Int64], compression_type: Annotated[Any, _atypes.String], metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + header_bytes = _ops.convert_to_tensor(header_bytes, _dtypes.int64) + record_bytes = _ops.convert_to_tensor(record_bytes, _dtypes.int64) + footer_bytes = _ops.convert_to_tensor(footer_bytes, _dtypes.int64) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + _inputs_flat = [filenames, header_bytes, record_bytes, footer_bytes, buffer_size, compression_type] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"FixedLengthRecordDatasetV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FixedLengthRecordDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def flat_map_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Unlike MapDataset, the `f` in FlatMapDataset is expected to return a + Dataset variant, and FlatMapDataset will flatten successive results + into a single Dataset. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FlatMapDataset", name, input_dataset, other_arguments, "f", f, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return flat_map_dataset_eager_fallback( + input_dataset, other_arguments, f=f, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'flat_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'flat_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FlatMapDataset", input_dataset=input_dataset, + other_arguments=other_arguments, f=f, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FlatMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FlatMapDataset = tf_export("raw_ops.FlatMapDataset")(_ops.to_raw_op(flat_map_dataset)) + + +def flat_map_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'flat_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'flat_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"FlatMapDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FlatMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args, init_func, next_func, finalize_func, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that invokes a function to generate elements. + + Args: + init_func_other_args: A list of `Tensor` objects. + next_func_other_args: A list of `Tensor` objects. + finalize_func_other_args: A list of `Tensor` objects. + init_func: A function decorated with @Defun. + next_func: A function decorated with @Defun. + finalize_func: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GeneratorDataset", name, init_func_other_args, + next_func_other_args, finalize_func_other_args, "init_func", + init_func, "next_func", next_func, "finalize_func", finalize_func, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return generator_dataset_eager_fallback( + init_func_other_args, next_func_other_args, + finalize_func_other_args, init_func=init_func, next_func=next_func, + finalize_func=finalize_func, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'generator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'generator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GeneratorDataset", init_func_other_args=init_func_other_args, + next_func_other_args=next_func_other_args, + finalize_func_other_args=finalize_func_other_args, + init_func=init_func, next_func=next_func, + finalize_func=finalize_func, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("init_func", _op.get_attr("init_func"), "next_func", + _op.get_attr("next_func"), "finalize_func", + _op.get_attr("finalize_func"), "Tinit_func_args", + _op.get_attr("Tinit_func_args"), "Tnext_func_args", + _op.get_attr("Tnext_func_args"), "Tfinalize_func_args", + _op.get_attr("Tfinalize_func_args"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "GeneratorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +GeneratorDataset = tf_export("raw_ops.GeneratorDataset")(_ops.to_raw_op(generator_dataset)) + + +def generator_dataset_eager_fallback(init_func_other_args, next_func_other_args, finalize_func_other_args, init_func, next_func, finalize_func, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'generator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'generator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Tinit_func_args, init_func_other_args = _execute.convert_to_mixed_eager_tensors(init_func_other_args, ctx) + _attr_Tnext_func_args, next_func_other_args = _execute.convert_to_mixed_eager_tensors(next_func_other_args, ctx) + _attr_Tfinalize_func_args, finalize_func_other_args = _execute.convert_to_mixed_eager_tensors(finalize_func_other_args, ctx) + _inputs_flat = list(init_func_other_args) + list(next_func_other_args) + list(finalize_func_other_args) + _attrs = ("init_func", init_func, "next_func", next_func, "finalize_func", + finalize_func, "Tinit_func_args", _attr_Tinit_func_args, "Tnext_func_args", + _attr_Tnext_func_args, "Tfinalize_func_args", _attr_Tfinalize_func_args, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"GeneratorDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GeneratorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def get_options(input_dataset: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.String]: + r"""Returns the `tf.data.Options` attached to `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GetOptions", name, input_dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return get_options_eager_fallback( + input_dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GetOptions", input_dataset=input_dataset, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "GetOptions", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +GetOptions = tf_export("raw_ops.GetOptions")(_ops.to_raw_op(get_options)) + + +def get_options_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.String]: + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = None + _result = _execute.execute(b"GetOptions", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GetOptions", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def interleave_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Unlike MapDataset, the `f` in InterleaveDataset is expected to return + a Dataset variant, and InterleaveDataset will flatten successive + results into a single Dataset. Unlike FlatMapDataset, + InterleaveDataset will interleave sequences of up to `block_length` + consecutive elements from `cycle_length` input elements. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + cycle_length: A `Tensor` of type `int64`. + block_length: A `Tensor` of type `int64`. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InterleaveDataset", name, input_dataset, other_arguments, + cycle_length, block_length, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return interleave_dataset_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, f=f, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InterleaveDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, f=f, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "InterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +InterleaveDataset = tf_export("raw_ops.InterleaveDataset")(_ops.to_raw_op(interleave_dataset)) + + +def interleave_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"InterleaveDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "InterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator(shared_name: str, container: str, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""A container for an iterator resource. + + Args: + shared_name: A `string`. + container: A `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Iterator", name, "shared_name", shared_name, "container", + container, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_eager_fallback( + shared_name=shared_name, container=container, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Iterator", shared_name=shared_name, container=container, + output_types=output_types, output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("shared_name", _op.get_attr("shared_name"), "container", + _op.get_attr("container"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Iterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Iterator = tf_export("raw_ops.Iterator")(_ops.to_raw_op(iterator)) + + +def iterator_eager_fallback(shared_name: str, container: str, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("shared_name", shared_name, "container", container, + "output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"Iterator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Iterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator_from_string_handle(string_handle: Annotated[Any, _atypes.String], output_types=[], output_shapes=[], name=None) -> Annotated[Any, _atypes.Resource]: + r"""Converts the given string representing a handle to an iterator to a resource. + + Args: + string_handle: A `Tensor` of type `string`. + A string representation of the given handle. + output_types: An optional list of `tf.DTypes`. Defaults to `[]`. + If specified, defines the type of each tuple component in an + element produced by the resulting iterator. + output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + If specified, defines the shape of each tuple component in an + element produced by the resulting iterator. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorFromStringHandle", name, string_handle, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_from_string_handle_eager_fallback( + string_handle, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_from_string_handle' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_from_string_handle' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorFromStringHandle", string_handle=string_handle, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorFromStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorFromStringHandle = tf_export("raw_ops.IteratorFromStringHandle")(_ops.to_raw_op(iterator_from_string_handle)) + + +def iterator_from_string_handle_eager_fallback(string_handle: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_from_string_handle' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_from_string_handle' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + string_handle = _ops.convert_to_tensor(string_handle, _dtypes.string) + _inputs_flat = [string_handle] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorFromStringHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorFromStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator_from_string_handle_v2(string_handle: Annotated[Any, _atypes.String], output_types=[], output_shapes=[], name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + string_handle: A `Tensor` of type `string`. + output_types: An optional list of `tf.DTypes`. Defaults to `[]`. + output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorFromStringHandleV2", name, string_handle, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_from_string_handle_v2_eager_fallback( + string_handle, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_from_string_handle_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_from_string_handle_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorFromStringHandleV2", string_handle=string_handle, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorFromStringHandleV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorFromStringHandleV2 = tf_export("raw_ops.IteratorFromStringHandleV2")(_ops.to_raw_op(iterator_from_string_handle_v2)) + + +def iterator_from_string_handle_v2_eager_fallback(string_handle: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_from_string_handle_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_from_string_handle_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + string_handle = _ops.convert_to_tensor(string_handle, _dtypes.string) + _inputs_flat = [string_handle] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorFromStringHandleV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorFromStringHandleV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator_get_next(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name=None): + r"""Gets the next output from the given iterator . + + Args: + iterator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorGetNext", name, iterator, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_get_next_eager_fallback( + iterator, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorGetNext", iterator=iterator, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorGetNext", _inputs_flat, _attrs, _result) + return _result + +IteratorGetNext = tf_export("raw_ops.IteratorGetNext")(_ops.to_raw_op(iterator_get_next)) + + +def iterator_get_next_eager_fallback(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [iterator] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorGetNext", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorGetNext", _inputs_flat, _attrs, _result) + return _result + + +def iterator_get_next_as_optional(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Gets the next output from the given iterator as an Optional variant. + + Args: + iterator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorGetNextAsOptional", name, iterator, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_get_next_as_optional_eager_fallback( + iterator, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next_as_optional' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next_as_optional' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorGetNextAsOptional", iterator=iterator, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorGetNextAsOptional", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorGetNextAsOptional = tf_export("raw_ops.IteratorGetNextAsOptional")(_ops.to_raw_op(iterator_get_next_as_optional)) + + +def iterator_get_next_as_optional_eager_fallback(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next_as_optional' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next_as_optional' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [iterator] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorGetNextAsOptional", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorGetNextAsOptional", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator_get_next_sync(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name=None): + r"""Gets the next output from the given iterator. + + This operation is a synchronous version IteratorGetNext. It should only be used + in situations where the iterator does not block the calling thread, or where + the calling thread is not a member of the thread pool used to execute parallel + operations (e.g. in eager mode). + + Args: + iterator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorGetNextSync", name, iterator, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_get_next_sync_eager_fallback( + iterator, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next_sync' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next_sync' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorGetNextSync", iterator=iterator, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorGetNextSync", _inputs_flat, _attrs, _result) + return _result + +IteratorGetNextSync = tf_export("raw_ops.IteratorGetNextSync")(_ops.to_raw_op(iterator_get_next_sync)) + + +def iterator_get_next_sync_eager_fallback(iterator: Annotated[Any, _atypes.Resource], output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_get_next_sync' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_get_next_sync' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [iterator] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorGetNextSync", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorGetNextSync", _inputs_flat, _attrs, _result) + return _result + + +def iterator_to_string_handle(resource_handle: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Converts the given `resource_handle` representing an iterator to a string. + + Args: + resource_handle: A `Tensor` of type `resource`. + A handle to an iterator resource. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorToStringHandle", name, resource_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_to_string_handle_eager_fallback( + resource_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorToStringHandle", resource_handle=resource_handle, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorToStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorToStringHandle = tf_export("raw_ops.IteratorToStringHandle")(_ops.to_raw_op(iterator_to_string_handle)) + + +def iterator_to_string_handle_eager_fallback(resource_handle: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + resource_handle = _ops.convert_to_tensor(resource_handle, _dtypes.resource) + _inputs_flat = [resource_handle] + _attrs = None + _result = _execute.execute(b"IteratorToStringHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorToStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def iterator_v2(shared_name: str, container: str, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + shared_name: A `string`. + container: A `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorV2", name, "shared_name", shared_name, "container", + container, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_v2_eager_fallback( + shared_name=shared_name, container=container, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorV2", shared_name=shared_name, container=container, + output_types=output_types, output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("shared_name", _op.get_attr("shared_name"), "container", + _op.get_attr("container"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorV2 = tf_export("raw_ops.IteratorV2")(_ops.to_raw_op(iterator_v2)) + + +def iterator_v2_eager_fallback(shared_name: str, container: str, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'iterator_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'iterator_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("shared_name", shared_name, "container", container, + "output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"IteratorV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def make_iterator(dataset: Annotated[Any, _atypes.Variant], iterator: Annotated[Any, _atypes.Resource], name=None): + r"""Makes a new iterator from the given `dataset` and stores it in `iterator`. + + This operation may be executed multiple times. Each execution will reset the + iterator in `iterator` to the first element of `dataset`. + + Args: + dataset: A `Tensor` of type `variant`. + iterator: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MakeIterator", name, dataset, iterator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return make_iterator_eager_fallback( + dataset, iterator, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MakeIterator", dataset=dataset, iterator=iterator, name=name) + return _op +MakeIterator = tf_export("raw_ops.MakeIterator")(_ops.to_raw_op(make_iterator)) + + +def make_iterator_eager_fallback(dataset: Annotated[Any, _atypes.Variant], iterator: Annotated[Any, _atypes.Resource], name, ctx): + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [dataset, iterator] + _attrs = None + _result = _execute.execute(b"MakeIterator", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def map_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, use_inter_op_parallelism:bool=True, preserve_cardinality:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_inter_op_parallelism: An optional `bool`. Defaults to `True`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MapDataset", name, input_dataset, other_arguments, "f", f, + "output_types", output_types, "output_shapes", output_shapes, + "use_inter_op_parallelism", use_inter_op_parallelism, + "preserve_cardinality", preserve_cardinality, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return map_dataset_eager_fallback( + input_dataset, other_arguments, f=f, output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + preserve_cardinality=preserve_cardinality, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MapDataset", input_dataset=input_dataset, + other_arguments=other_arguments, f=f, + output_types=output_types, output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + preserve_cardinality=preserve_cardinality, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_inter_op_parallelism", + _op._get_attr_bool("use_inter_op_parallelism"), + "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MapDataset = tf_export("raw_ops.MapDataset")(_ops.to_raw_op(map_dataset)) + + +def map_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, use_inter_op_parallelism: bool, preserve_cardinality: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "preserve_cardinality", preserve_cardinality, + "metadata", metadata) + _result = _execute.execute(b"MapDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def map_defun(arguments, captured_inputs, output_types, output_shapes, f, max_intra_op_parallelism:int=1, name=None): + r""" Maps a function on the list of tensors unpacked from arguments on dimension 0. + The function given by `f` is assumed to be stateless, and is executed + concurrently on all the slices; up to batch_size (i.e. the size of the 0th + dimension of each argument) functions will be scheduled at once. + + The `max_intra_op_parallelism` attr, which defaults to 1, can be used to + limit the intra op parallelism. To limit inter-op parallelism, a user can + set a private threadpool on the dataset using `tf.data.Options`'s + `ThreadingOptions`. + + Note that this op is not exposed to users directly, but is invoked in tf.data + rewrites. + + Args: + arguments: A list of `Tensor` objects. + A list of tensors whose types are `Targuments`, corresponding to the inputs + the function should be mapped over. + captured_inputs: A list of `Tensor` objects. + A list of tensors whose types are `Tcaptured`, corresponding to the captured + inputs of the defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + A list of types. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + A list of shapes. + f: A function decorated with @Defun. + max_intra_op_parallelism: An optional `int`. Defaults to `1`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MapDefun", name, arguments, captured_inputs, "output_types", + output_types, "output_shapes", output_shapes, "f", f, + "max_intra_op_parallelism", max_intra_op_parallelism) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return map_defun_eager_fallback( + arguments, captured_inputs, output_types=output_types, + output_shapes=output_shapes, f=f, + max_intra_op_parallelism=max_intra_op_parallelism, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_defun' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_defun' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MapDefun", arguments=arguments, captured_inputs=captured_inputs, + output_types=output_types, output_shapes=output_shapes, + f=f, max_intra_op_parallelism=max_intra_op_parallelism, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Targuments", _op.get_attr("Targuments"), "Tcaptured", + _op.get_attr("Tcaptured"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "f", _op.get_attr("f"), + "max_intra_op_parallelism", + _op._get_attr_int("max_intra_op_parallelism")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MapDefun", _inputs_flat, _attrs, _result) + return _result + +MapDefun = tf_export("raw_ops.MapDefun")(_ops.to_raw_op(map_defun)) + + +def map_defun_eager_fallback(arguments, captured_inputs, output_types, output_shapes, f, max_intra_op_parallelism: int, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_defun' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_defun' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + _attr_Targuments, arguments = _execute.convert_to_mixed_eager_tensors(arguments, ctx) + _attr_Tcaptured, captured_inputs = _execute.convert_to_mixed_eager_tensors(captured_inputs, ctx) + _inputs_flat = list(arguments) + list(captured_inputs) + _attrs = ("Targuments", _attr_Targuments, "Tcaptured", _attr_Tcaptured, + "output_types", output_types, "output_shapes", output_shapes, "f", f, + "max_intra_op_parallelism", max_intra_op_parallelism) + _result = _execute.execute(b"MapDefun", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MapDefun", _inputs_flat, _attrs, _result) + return _result + + +def model_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, algorithm:int=0, cpu_budget:int=0, ram_budget:int=0, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Identity transformation that models performance. + + Identity transformation that models performance. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + algorithm: An optional `int`. Defaults to `0`. + cpu_budget: An optional `int`. Defaults to `0`. + ram_budget: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ModelDataset", name, input_dataset, "algorithm", algorithm, + "cpu_budget", cpu_budget, "ram_budget", ram_budget, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return model_dataset_eager_fallback( + input_dataset, algorithm=algorithm, cpu_budget=cpu_budget, + ram_budget=ram_budget, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'model_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'model_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if algorithm is None: + algorithm = 0 + algorithm = _execute.make_int(algorithm, "algorithm") + if cpu_budget is None: + cpu_budget = 0 + cpu_budget = _execute.make_int(cpu_budget, "cpu_budget") + if ram_budget is None: + ram_budget = 0 + ram_budget = _execute.make_int(ram_budget, "ram_budget") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ModelDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, algorithm=algorithm, + cpu_budget=cpu_budget, ram_budget=ram_budget, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("algorithm", _op._get_attr_int("algorithm"), "cpu_budget", + _op._get_attr_int("cpu_budget"), "ram_budget", + _op._get_attr_int("ram_budget"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ModelDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ModelDataset = tf_export("raw_ops.ModelDataset")(_ops.to_raw_op(model_dataset)) + + +def model_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, algorithm: int, cpu_budget: int, ram_budget: int, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'model_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'model_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if algorithm is None: + algorithm = 0 + algorithm = _execute.make_int(algorithm, "algorithm") + if cpu_budget is None: + cpu_budget = 0 + cpu_budget = _execute.make_int(cpu_budget, "cpu_budget") + if ram_budget is None: + ram_budget = 0 + ram_budget = _execute.make_int(ram_budget, "ram_budget") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("algorithm", algorithm, "cpu_budget", cpu_budget, "ram_budget", + ram_budget, "output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ModelDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ModelDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def multi_device_iterator(devices, shared_name: str, container: str, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a MultiDeviceIterator resource. + + Args: + devices: A list of `strings` that has length `>= 1`. + A list of devices the iterator works across. + shared_name: A `string`. + If non-empty, this resource will be shared under the given name + across multiple sessions. + container: A `string`. + If non-empty, this resource is placed in the given container. + Otherwise, a default container is used. + output_types: A list of `tf.DTypes` that has length `>= 1`. + The type list for the return values. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + The list of shapes being produced. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MultiDeviceIterator", name, "devices", devices, "shared_name", + shared_name, "container", container, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multi_device_iterator_eager_fallback( + devices=devices, shared_name=shared_name, container=container, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'multi_device_iterator' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MultiDeviceIterator", devices=devices, shared_name=shared_name, + container=container, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("devices", _op.get_attr("devices"), "shared_name", + _op.get_attr("shared_name"), "container", + _op.get_attr("container"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MultiDeviceIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MultiDeviceIterator = tf_export("raw_ops.MultiDeviceIterator")(_ops.to_raw_op(multi_device_iterator)) + + +def multi_device_iterator_eager_fallback(devices, shared_name: str, container: str, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if not isinstance(devices, (list, tuple)): + raise TypeError( + "Expected list for 'devices' argument to " + "'multi_device_iterator' Op, not %r." % devices) + devices = [_execute.make_str(_s, "devices") for _s in devices] + shared_name = _execute.make_str(shared_name, "shared_name") + container = _execute.make_str(container, "container") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _inputs_flat = [] + _attrs = ("devices", devices, "shared_name", shared_name, "container", + container, "output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"MultiDeviceIterator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MultiDeviceIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def multi_device_iterator_from_string_handle(string_handle: Annotated[Any, _atypes.String], output_types=[], output_shapes=[], name=None) -> Annotated[Any, _atypes.Resource]: + r"""Generates a MultiDeviceIterator resource from its provided string handle. + + Args: + string_handle: A `Tensor` of type `string`. + String representing the resource. + output_types: An optional list of `tf.DTypes`. Defaults to `[]`. + The type list for the return values. + output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + The list of shapes being produced. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MultiDeviceIteratorFromStringHandle", name, string_handle, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multi_device_iterator_from_string_handle_eager_fallback( + string_handle, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator_from_string_handle' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator_from_string_handle' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MultiDeviceIteratorFromStringHandle", string_handle=string_handle, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MultiDeviceIteratorFromStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MultiDeviceIteratorFromStringHandle = tf_export("raw_ops.MultiDeviceIteratorFromStringHandle")(_ops.to_raw_op(multi_device_iterator_from_string_handle)) + + +def multi_device_iterator_from_string_handle_eager_fallback(string_handle: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Resource]: + if output_types is None: + output_types = [] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator_from_string_handle' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if output_shapes is None: + output_shapes = [] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator_from_string_handle' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + string_handle = _ops.convert_to_tensor(string_handle, _dtypes.string) + _inputs_flat = [string_handle] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"MultiDeviceIteratorFromStringHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MultiDeviceIteratorFromStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def multi_device_iterator_get_next_from_shard(multi_device_iterator: Annotated[Any, _atypes.Resource], shard_num: Annotated[Any, _atypes.Int32], incarnation_id: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None): + r"""Gets next element for the provided shard number. + + Args: + multi_device_iterator: A `Tensor` of type `resource`. + A MultiDeviceIterator resource. + shard_num: A `Tensor` of type `int32`. + Integer representing which shard to fetch data for. + incarnation_id: A `Tensor` of type `int64`. + Which incarnation of the MultiDeviceIterator is running. + output_types: A list of `tf.DTypes` that has length `>= 1`. + The type list for the return values. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + The list of shapes being produced. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MultiDeviceIteratorGetNextFromShard", name, + multi_device_iterator, shard_num, incarnation_id, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multi_device_iterator_get_next_from_shard_eager_fallback( + multi_device_iterator, shard_num, incarnation_id, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator_get_next_from_shard' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator_get_next_from_shard' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MultiDeviceIteratorGetNextFromShard", multi_device_iterator=multi_device_iterator, + shard_num=shard_num, + incarnation_id=incarnation_id, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MultiDeviceIteratorGetNextFromShard", _inputs_flat, _attrs, _result) + return _result + +MultiDeviceIteratorGetNextFromShard = tf_export("raw_ops.MultiDeviceIteratorGetNextFromShard")(_ops.to_raw_op(multi_device_iterator_get_next_from_shard)) + + +def multi_device_iterator_get_next_from_shard_eager_fallback(multi_device_iterator: Annotated[Any, _atypes.Resource], shard_num: Annotated[Any, _atypes.Int32], incarnation_id: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'multi_device_iterator_get_next_from_shard' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'multi_device_iterator_get_next_from_shard' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + multi_device_iterator = _ops.convert_to_tensor(multi_device_iterator, _dtypes.resource) + shard_num = _ops.convert_to_tensor(shard_num, _dtypes.int32) + incarnation_id = _ops.convert_to_tensor(incarnation_id, _dtypes.int64) + _inputs_flat = [multi_device_iterator, shard_num, incarnation_id] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"MultiDeviceIteratorGetNextFromShard", + len(output_types), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MultiDeviceIteratorGetNextFromShard", _inputs_flat, _attrs, _result) + return _result + + +def multi_device_iterator_init(dataset: Annotated[Any, _atypes.Variant], multi_device_iterator: Annotated[Any, _atypes.Resource], max_buffer_size: Annotated[Any, _atypes.Int64], name=None) -> Annotated[Any, _atypes.Int64]: + r"""Initializes the multi device iterator with the given dataset. + + Args: + dataset: A `Tensor` of type `variant`. Dataset to be iterated upon. + multi_device_iterator: A `Tensor` of type `resource`. + A MultiDeviceIteratorResource. + max_buffer_size: A `Tensor` of type `int64`. + The maximum size of the host side per device buffer to keep. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MultiDeviceIteratorInit", name, dataset, multi_device_iterator, + max_buffer_size) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multi_device_iterator_init_eager_fallback( + dataset, multi_device_iterator, max_buffer_size, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MultiDeviceIteratorInit", dataset=dataset, + multi_device_iterator=multi_device_iterator, + max_buffer_size=max_buffer_size, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "MultiDeviceIteratorInit", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MultiDeviceIteratorInit = tf_export("raw_ops.MultiDeviceIteratorInit")(_ops.to_raw_op(multi_device_iterator_init)) + + +def multi_device_iterator_init_eager_fallback(dataset: Annotated[Any, _atypes.Variant], multi_device_iterator: Annotated[Any, _atypes.Resource], max_buffer_size: Annotated[Any, _atypes.Int64], name, ctx) -> Annotated[Any, _atypes.Int64]: + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + multi_device_iterator = _ops.convert_to_tensor(multi_device_iterator, _dtypes.resource) + max_buffer_size = _ops.convert_to_tensor(max_buffer_size, _dtypes.int64) + _inputs_flat = [dataset, multi_device_iterator, max_buffer_size] + _attrs = None + _result = _execute.execute(b"MultiDeviceIteratorInit", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MultiDeviceIteratorInit", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def multi_device_iterator_to_string_handle(multi_device_iterator: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Produces a string handle for the given MultiDeviceIterator. + + Args: + multi_device_iterator: A `Tensor` of type `resource`. + A MultiDeviceIterator resource. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MultiDeviceIteratorToStringHandle", name, + multi_device_iterator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multi_device_iterator_to_string_handle_eager_fallback( + multi_device_iterator, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MultiDeviceIteratorToStringHandle", multi_device_iterator=multi_device_iterator, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "MultiDeviceIteratorToStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MultiDeviceIteratorToStringHandle = tf_export("raw_ops.MultiDeviceIteratorToStringHandle")(_ops.to_raw_op(multi_device_iterator_to_string_handle)) + + +def multi_device_iterator_to_string_handle_eager_fallback(multi_device_iterator: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + multi_device_iterator = _ops.convert_to_tensor(multi_device_iterator, _dtypes.resource) + _inputs_flat = [multi_device_iterator] + _attrs = None + _result = _execute.execute(b"MultiDeviceIteratorToStringHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MultiDeviceIteratorToStringHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def one_shot_iterator(dataset_factory, output_types, output_shapes, container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Makes a "one-shot" iterator that can be iterated only once. + + A one-shot iterator bundles the logic for defining the dataset and + the state of the iterator in a single op, which allows simple input + pipelines to be defined without an additional initialization + ("MakeIterator") step. + + One-shot iterators have the following limitations: + + * They do not support parameterization: all logic for creating the underlying + dataset must be bundled in the `dataset_factory` function. + * They are not resettable. Once a one-shot iterator reaches the end of its + underlying dataset, subsequent "IteratorGetNext" operations on that + iterator will always produce an `OutOfRange` error. + + For greater flexibility, use "Iterator" and "MakeIterator" to define + an iterator using an arbitrary subgraph, which may capture tensors + (including fed values) as parameters, and which may be reset multiple + times by rerunning "MakeIterator". + + Args: + dataset_factory: A function decorated with @Defun. + A function of type `() -> DT_VARIANT`, where the returned + DT_VARIANT is a dataset. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OneShotIterator", name, "dataset_factory", dataset_factory, + "output_types", output_types, "output_shapes", output_shapes, + "container", container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return one_shot_iterator_eager_fallback( + dataset_factory=dataset_factory, output_types=output_types, + output_shapes=output_shapes, container=container, + shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'one_shot_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'one_shot_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OneShotIterator", dataset_factory=dataset_factory, + output_types=output_types, + output_shapes=output_shapes, container=container, + shared_name=shared_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dataset_factory", _op.get_attr("dataset_factory"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "container", + _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OneShotIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OneShotIterator = tf_export("raw_ops.OneShotIterator")(_ops.to_raw_op(one_shot_iterator)) + + +def one_shot_iterator_eager_fallback(dataset_factory, output_types, output_shapes, container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'one_shot_iterator' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'one_shot_iterator' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("dataset_factory", dataset_factory, "output_types", output_types, + "output_shapes", output_shapes, "container", container, "shared_name", + shared_name) + _result = _execute.execute(b"OneShotIterator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OneShotIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def optimize_dataset(input_dataset: Annotated[Any, _atypes.Variant], optimizations: Annotated[Any, _atypes.String], output_types, output_shapes, optimization_configs=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset by applying optimizations to `input_dataset`. + + Creates a dataset by applying optimizations to `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + optimizations: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying optimizations to use. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + optimization_configs: An optional list of `strings`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OptimizeDataset", name, input_dataset, optimizations, + "output_types", output_types, "output_shapes", output_shapes, + "optimization_configs", optimization_configs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return optimize_dataset_eager_fallback( + input_dataset, optimizations, output_types=output_types, + output_shapes=output_shapes, + optimization_configs=optimization_configs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'optimize_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'optimize_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if optimization_configs is None: + optimization_configs = [] + if not isinstance(optimization_configs, (list, tuple)): + raise TypeError( + "Expected list for 'optimization_configs' argument to " + "'optimize_dataset' Op, not %r." % optimization_configs) + optimization_configs = [_execute.make_str(_s, "optimization_configs") for _s in optimization_configs] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OptimizeDataset", input_dataset=input_dataset, + optimizations=optimizations, + output_types=output_types, + output_shapes=output_shapes, + optimization_configs=optimization_configs, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "optimization_configs", + _op.get_attr("optimization_configs")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OptimizeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OptimizeDataset = tf_export("raw_ops.OptimizeDataset")(_ops.to_raw_op(optimize_dataset)) + + +def optimize_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], optimizations: Annotated[Any, _atypes.String], output_types, output_shapes, optimization_configs, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'optimize_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'optimize_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if optimization_configs is None: + optimization_configs = [] + if not isinstance(optimization_configs, (list, tuple)): + raise TypeError( + "Expected list for 'optimization_configs' argument to " + "'optimize_dataset' Op, not %r." % optimization_configs) + optimization_configs = [_execute.make_str(_s, "optimization_configs") for _s in optimization_configs] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + optimizations = _ops.convert_to_tensor(optimizations, _dtypes.string) + _inputs_flat = [input_dataset, optimizations] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "optimization_configs", optimization_configs) + _result = _execute.execute(b"OptimizeDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OptimizeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def optimize_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], optimizations_enabled: Annotated[Any, _atypes.String], optimizations_disabled: Annotated[Any, _atypes.String], optimizations_default: Annotated[Any, _atypes.String], output_types, output_shapes, optimization_configs=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset by applying related optimizations to `input_dataset`. + + Creates a dataset by applying related optimizations to `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + optimizations_enabled: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying user enabled optimizations. + optimizations_disabled: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying user disabled optimizations. + optimizations_default: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying optimizations by default. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + optimization_configs: An optional list of `strings`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OptimizeDatasetV2", name, input_dataset, optimizations_enabled, + optimizations_disabled, optimizations_default, "output_types", + output_types, "output_shapes", output_shapes, "optimization_configs", + optimization_configs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return optimize_dataset_v2_eager_fallback( + input_dataset, optimizations_enabled, optimizations_disabled, + optimizations_default, output_types=output_types, + output_shapes=output_shapes, + optimization_configs=optimization_configs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'optimize_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'optimize_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if optimization_configs is None: + optimization_configs = [] + if not isinstance(optimization_configs, (list, tuple)): + raise TypeError( + "Expected list for 'optimization_configs' argument to " + "'optimize_dataset_v2' Op, not %r." % optimization_configs) + optimization_configs = [_execute.make_str(_s, "optimization_configs") for _s in optimization_configs] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OptimizeDatasetV2", input_dataset=input_dataset, + optimizations_enabled=optimizations_enabled, + optimizations_disabled=optimizations_disabled, + optimizations_default=optimizations_default, + output_types=output_types, + output_shapes=output_shapes, + optimization_configs=optimization_configs, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "optimization_configs", + _op.get_attr("optimization_configs")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OptimizeDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OptimizeDatasetV2 = tf_export("raw_ops.OptimizeDatasetV2")(_ops.to_raw_op(optimize_dataset_v2)) + + +def optimize_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], optimizations_enabled: Annotated[Any, _atypes.String], optimizations_disabled: Annotated[Any, _atypes.String], optimizations_default: Annotated[Any, _atypes.String], output_types, output_shapes, optimization_configs, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'optimize_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'optimize_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if optimization_configs is None: + optimization_configs = [] + if not isinstance(optimization_configs, (list, tuple)): + raise TypeError( + "Expected list for 'optimization_configs' argument to " + "'optimize_dataset_v2' Op, not %r." % optimization_configs) + optimization_configs = [_execute.make_str(_s, "optimization_configs") for _s in optimization_configs] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + optimizations_enabled = _ops.convert_to_tensor(optimizations_enabled, _dtypes.string) + optimizations_disabled = _ops.convert_to_tensor(optimizations_disabled, _dtypes.string) + optimizations_default = _ops.convert_to_tensor(optimizations_default, _dtypes.string) + _inputs_flat = [input_dataset, optimizations_enabled, optimizations_disabled, optimizations_default] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "optimization_configs", optimization_configs) + _result = _execute.execute(b"OptimizeDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OptimizeDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def options_dataset(input_dataset: Annotated[Any, _atypes.Variant], serialized_options: str, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset by attaching tf.data.Options to `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + serialized_options: A `string`. + A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OptionsDataset", name, input_dataset, "serialized_options", + serialized_options, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return options_dataset_eager_fallback( + input_dataset, serialized_options=serialized_options, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + serialized_options = _execute.make_str(serialized_options, "serialized_options") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'options_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'options_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OptionsDataset", input_dataset=input_dataset, + serialized_options=serialized_options, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("serialized_options", _op.get_attr("serialized_options"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OptionsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OptionsDataset = tf_export("raw_ops.OptionsDataset")(_ops.to_raw_op(options_dataset)) + + +def options_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], serialized_options: str, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + serialized_options = _execute.make_str(serialized_options, "serialized_options") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'options_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'options_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("serialized_options", serialized_options, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"OptionsDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OptionsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def padded_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], padded_shapes: Annotated[List[Any], _atypes.Int64], padding_values, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches and pads `batch_size` elements from the input. + + Args: + input_dataset: A `Tensor` of type `variant`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. + padded_shapes: A list of at least 1 `Tensor` objects with type `int64`. + A list of int64 tensors representing the desired padded shapes + of the corresponding output components. These shapes may be partially + specified, using `-1` to indicate that a particular dimension should be + padded to the maximum size of all batch elements. + padding_values: A list of `Tensor` objects. + A list of scalars containing the padding value to use for + each of the outputs. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PaddedBatchDataset", name, input_dataset, batch_size, + padded_shapes, padding_values, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return padded_batch_dataset_eager_fallback( + input_dataset, batch_size, padded_shapes, padding_values, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(padded_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'padded_shapes' argument to " + "'padded_batch_dataset' Op, not %r." % padded_shapes) + _attr_N = len(padded_shapes) + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'padded_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PaddedBatchDataset", input_dataset=input_dataset, + batch_size=batch_size, + padded_shapes=padded_shapes, + padding_values=padding_values, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Toutput_types", _op.get_attr("Toutput_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N"), + "metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PaddedBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PaddedBatchDataset = tf_export("raw_ops.PaddedBatchDataset")(_ops.to_raw_op(padded_batch_dataset)) + + +def padded_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], padded_shapes: Annotated[List[Any], _atypes.Int64], padding_values, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(padded_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'padded_shapes' argument to " + "'padded_batch_dataset' Op, not %r." % padded_shapes) + _attr_N = len(padded_shapes) + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'padded_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Toutput_types, padding_values = _execute.convert_to_mixed_eager_tensors(padding_values, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + padded_shapes = _ops.convert_n_to_tensor(padded_shapes, _dtypes.int64) + _inputs_flat = [input_dataset, batch_size] + list(padded_shapes) + list(padding_values) + _attrs = ("Toutput_types", _attr_Toutput_types, "output_shapes", + output_shapes, "N", _attr_N, "metadata", metadata) + _result = _execute.execute(b"PaddedBatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PaddedBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def padded_batch_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], padded_shapes: Annotated[List[Any], _atypes.Int64], padding_values, drop_remainder: Annotated[Any, _atypes.Bool], output_shapes, parallel_copy:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches and pads `batch_size` elements from the input. + + Args: + input_dataset: A `Tensor` of type `variant`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. + padded_shapes: A list of at least 1 `Tensor` objects with type `int64`. + A list of int64 tensors representing the desired padded shapes + of the corresponding output components. These shapes may be partially + specified, using `-1` to indicate that a particular dimension should be + padded to the maximum size of all batch elements. + padding_values: A list of `Tensor` objects. + A list of scalars containing the padding value to use for + each of the outputs. + drop_remainder: A `Tensor` of type `bool`. + A scalar representing whether the last batch should be dropped in case its size + is smaller than desired. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + parallel_copy: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PaddedBatchDatasetV2", name, input_dataset, batch_size, + padded_shapes, padding_values, drop_remainder, "parallel_copy", + parallel_copy, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return padded_batch_dataset_v2_eager_fallback( + input_dataset, batch_size, padded_shapes, padding_values, + drop_remainder, parallel_copy=parallel_copy, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(padded_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'padded_shapes' argument to " + "'padded_batch_dataset_v2' Op, not %r." % padded_shapes) + _attr_N = len(padded_shapes) + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'padded_batch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PaddedBatchDatasetV2", input_dataset=input_dataset, + batch_size=batch_size, + padded_shapes=padded_shapes, + padding_values=padding_values, + drop_remainder=drop_remainder, + output_shapes=output_shapes, + parallel_copy=parallel_copy, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("parallel_copy", _op._get_attr_bool("parallel_copy"), + "Toutput_types", _op.get_attr("Toutput_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N"), + "metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PaddedBatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PaddedBatchDatasetV2 = tf_export("raw_ops.PaddedBatchDatasetV2")(_ops.to_raw_op(padded_batch_dataset_v2)) + + +def padded_batch_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], padded_shapes: Annotated[List[Any], _atypes.Int64], padding_values, drop_remainder: Annotated[Any, _atypes.Bool], output_shapes, parallel_copy: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(padded_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'padded_shapes' argument to " + "'padded_batch_dataset_v2' Op, not %r." % padded_shapes) + _attr_N = len(padded_shapes) + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'padded_batch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Toutput_types, padding_values = _execute.convert_to_mixed_eager_tensors(padding_values, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + padded_shapes = _ops.convert_n_to_tensor(padded_shapes, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset, batch_size] + list(padded_shapes) + list(padding_values) + [drop_remainder] + _attrs = ("parallel_copy", parallel_copy, "Toutput_types", + _attr_Toutput_types, "output_shapes", output_shapes, "N", _attr_N, + "metadata", metadata) + _result = _execute.execute(b"PaddedBatchDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PaddedBatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, parallel_copy:bool=False, deterministic:str="default", metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + batch_size: A `Tensor` of type `int64`. + num_parallel_calls: A `Tensor` of type `int64`. + drop_remainder: A `Tensor` of type `bool`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + parallel_copy: An optional `bool`. Defaults to `False`. + deterministic: An optional `string`. Defaults to `"default"`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelBatchDataset", name, input_dataset, batch_size, + num_parallel_calls, drop_remainder, "parallel_copy", parallel_copy, + "output_types", output_types, "output_shapes", output_shapes, + "deterministic", deterministic, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_batch_dataset_eager_fallback( + input_dataset, batch_size, num_parallel_calls, drop_remainder, + parallel_copy=parallel_copy, output_types=output_types, + output_shapes=output_shapes, deterministic=deterministic, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelBatchDataset", input_dataset=input_dataset, + batch_size=batch_size, + num_parallel_calls=num_parallel_calls, + drop_remainder=drop_remainder, + output_types=output_types, + output_shapes=output_shapes, + parallel_copy=parallel_copy, + deterministic=deterministic, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("parallel_copy", _op._get_attr_bool("parallel_copy"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "deterministic", + _op.get_attr("deterministic"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelBatchDataset = tf_export("raw_ops.ParallelBatchDataset")(_ops.to_raw_op(parallel_batch_dataset)) + + +def parallel_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, parallel_copy: bool, deterministic: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if parallel_copy is None: + parallel_copy = False + parallel_copy = _execute.make_bool(parallel_copy, "parallel_copy") + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset, batch_size, num_parallel_calls, drop_remainder] + _attrs = ("parallel_copy", parallel_copy, "output_types", output_types, + "output_shapes", output_shapes, "deterministic", deterministic, "metadata", + metadata) + _result = _execute.execute(b"ParallelBatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_filter_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int64], predicate, output_types, output_shapes, deterministic:str="default", metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset containing elements of `input_dataset` matching `predicate`. + + The `predicate` function must return a scalar boolean and accept the + following arguments: + + * One tensor for each component of an element of `input_dataset`. + * One tensor for each value in `other_arguments`. + + Unlike a "FilterDataset", which applies `predicate` sequentially, this dataset + invokes up to `num_parallel_calls` copies of `predicate` in parallel. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `predicate`. + num_parallel_calls: A `Tensor` of type `int64`. + The number of concurrent invocations of `predicate` that process + elements from `input_dataset` in parallel. + predicate: A function decorated with @Defun. + A function returning a scalar boolean. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + deterministic: An optional `string`. Defaults to `"default"`. + A string indicating the op-level determinism to use. Deterministic controls + whether the interleave is allowed to return elements out of order if the next + element to be returned isn't available, but a later element is. Options are + "true", "false", and "default". "default" indicates that determinism should be + decided by the `experimental_deterministic` parameter of `tf.data.Options`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelFilterDataset", name, input_dataset, other_arguments, + num_parallel_calls, "predicate", predicate, "deterministic", + deterministic, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_filter_dataset_eager_fallback( + input_dataset, other_arguments, num_parallel_calls, + predicate=predicate, deterministic=deterministic, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_filter_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_filter_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelFilterDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + num_parallel_calls=num_parallel_calls, + predicate=predicate, + output_types=output_types, + output_shapes=output_shapes, + deterministic=deterministic, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("predicate", _op.get_attr("predicate"), "deterministic", + _op.get_attr("deterministic"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelFilterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelFilterDataset = tf_export("raw_ops.ParallelFilterDataset")(_ops.to_raw_op(parallel_filter_dataset)) + + +def parallel_filter_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int64], predicate, output_types, output_shapes, deterministic: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_filter_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_filter_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [num_parallel_calls] + _attrs = ("predicate", predicate, "deterministic", deterministic, + "Targuments", _attr_Targuments, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"ParallelFilterDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelFilterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_interleave_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, sloppy:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, except that the + dataset will fetch records from the interleaved datasets in parallel. + + The `tf.data` Python API creates instances of this op from + `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + is set to any value other than `None`. + + By default, the output of this dataset will be deterministic, which may result + in the dataset blocking if the next data item to be returned isn't available. + In order to avoid head-of-line blocking, one can set the + `experimental_deterministic` parameter of `tf.data.Options` to `False`, + which can improve performance at the expense of non-determinism. + + Args: + input_dataset: A `Tensor` of type `variant`. + Dataset that produces a stream of arguments for the function `f`. + other_arguments: A list of `Tensor` objects. + Additional arguments to pass to `f` beyond those produced by `input_dataset`. + Evaluated once when the dataset is instantiated. + cycle_length: A `Tensor` of type `int64`. + Number of datasets (each created by applying `f` to the elements of + `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + round-robin fashion. + block_length: A `Tensor` of type `int64`. + Number of elements at a time to produce from each interleaved invocation of a + dataset returned by `f`. + num_parallel_calls: A `Tensor` of type `int64`. + Determines the number of threads that should be used for fetching data from + input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + constant can be used to indicate that the level of parallelism should be autotuned. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + sloppy: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelInterleaveDatasetV2", name, input_dataset, + other_arguments, cycle_length, block_length, num_parallel_calls, "f", + f, "output_types", output_types, "output_shapes", output_shapes, + "sloppy", sloppy, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_interleave_dataset_v2_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, + num_parallel_calls, f=f, output_types=output_types, + output_shapes=output_shapes, sloppy=sloppy, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelInterleaveDatasetV2", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, + num_parallel_calls=num_parallel_calls, + f=f, output_types=output_types, + output_shapes=output_shapes, + sloppy=sloppy, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "sloppy", + _op._get_attr_bool("sloppy"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelInterleaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelInterleaveDatasetV2 = tf_export("raw_ops.ParallelInterleaveDatasetV2")(_ops.to_raw_op(parallel_interleave_dataset_v2)) + + +def parallel_interleave_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, sloppy: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, num_parallel_calls] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "sloppy", sloppy, "metadata", + metadata) + _result = _execute.execute(b"ParallelInterleaveDatasetV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelInterleaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_interleave_dataset_v3(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic:str="default", metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, except that the + dataset will fetch records from the interleaved datasets in parallel. + + The `tf.data` Python API creates instances of this op from + `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + is set to any value other than `None`. + + By default, the output of this dataset will be deterministic, which may result + in the dataset blocking if the next data item to be returned isn't available. + In order to avoid head-of-line blocking, one can either set the `deterministic` + attribute to "false", or leave it as "default" and set the + `experimental_deterministic` parameter of `tf.data.Options` to `False`. + This can improve performance at the expense of non-determinism. + + Args: + input_dataset: A `Tensor` of type `variant`. + Dataset that produces a stream of arguments for the function `f`. + other_arguments: A list of `Tensor` objects. + Additional arguments to pass to `f` beyond those produced by `input_dataset`. + Evaluated once when the dataset is instantiated. + cycle_length: A `Tensor` of type `int64`. + Number of datasets (each created by applying `f` to the elements of + `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + round-robin fashion. + block_length: A `Tensor` of type `int64`. + Number of elements at a time to produce from each interleaved invocation of a + dataset returned by `f`. + num_parallel_calls: A `Tensor` of type `int64`. + Determines the number of threads that should be used for fetching data from + input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + constant can be used to indicate that the level of parallelism should be autotuned. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + deterministic: An optional `string`. Defaults to `"default"`. + A string indicating the op-level determinism to use. Deterministic controls + whether the interleave is allowed to return elements out of order if the next + element to be returned isn't available, but a later element is. Options are + "true", "false", and "default". "default" indicates that determinism should be + decided by the `experimental_deterministic` parameter of `tf.data.Options`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelInterleaveDatasetV3", name, input_dataset, + other_arguments, cycle_length, block_length, num_parallel_calls, "f", + f, "deterministic", deterministic, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_interleave_dataset_v3_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, + num_parallel_calls, f=f, deterministic=deterministic, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelInterleaveDatasetV3", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, + num_parallel_calls=num_parallel_calls, + f=f, output_types=output_types, + output_shapes=output_shapes, + deterministic=deterministic, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "deterministic", + _op.get_attr("deterministic"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelInterleaveDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelInterleaveDatasetV3 = tf_export("raw_ops.ParallelInterleaveDatasetV3")(_ops.to_raw_op(parallel_interleave_dataset_v3)) + + +def parallel_interleave_dataset_v3_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, num_parallel_calls] + _attrs = ("f", f, "deterministic", deterministic, "Targuments", + _attr_Targuments, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + _result = _execute.execute(b"ParallelInterleaveDatasetV3", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelInterleaveDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_interleave_dataset_v4(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic:str="default", metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, except that the + dataset will fetch records from the interleaved datasets in parallel. + + The `tf.data` Python API creates instances of this op from + `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + is set to any value other than `None`. + + By default, the output of this dataset will be deterministic, which may result + in the dataset blocking if the next data item to be returned isn't available. + In order to avoid head-of-line blocking, one can either set the `deterministic` + attribute to "false", or leave it as "default" and set the + `experimental_deterministic` parameter of `tf.data.Options` to `False`. + This can improve performance at the expense of non-determinism. + + Args: + input_dataset: A `Tensor` of type `variant`. + Dataset that produces a stream of arguments for the function `f`. + other_arguments: A list of `Tensor` objects. + Additional arguments to pass to `f` beyond those produced by `input_dataset`. + Evaluated once when the dataset is instantiated. + cycle_length: A `Tensor` of type `int64`. + Number of datasets (each created by applying `f` to the elements of + `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + round-robin fashion. + block_length: A `Tensor` of type `int64`. + Number of elements at a time to produce from each interleaved invocation of a + dataset returned by `f`. + buffer_output_elements: A `Tensor` of type `int64`. + The number of elements each iterator being interleaved should buffer (similar + to the `.prefetch()` transformation for each interleaved iterator). + prefetch_input_elements: A `Tensor` of type `int64`. + Determines the number of iterators to prefetch, allowing buffers to warm up and + data to be pre-fetched without blocking the main thread. + num_parallel_calls: A `Tensor` of type `int64`. + Determines the number of threads that should be used for fetching data from + input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + constant can be used to indicate that the level of parallelism should be autotuned. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + deterministic: An optional `string`. Defaults to `"default"`. + A string indicating the op-level determinism to use. Deterministic controls + whether the interleave is allowed to return elements out of order if the next + element to be returned isn't available, but a later element is. Options are + "true", "false", and "default". "default" indicates that determinism should be + decided by the `experimental_deterministic` parameter of `tf.data.Options`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelInterleaveDatasetV4", name, input_dataset, + other_arguments, cycle_length, block_length, buffer_output_elements, + prefetch_input_elements, num_parallel_calls, "f", f, "deterministic", + deterministic, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_interleave_dataset_v4_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, + buffer_output_elements, prefetch_input_elements, num_parallel_calls, + f=f, deterministic=deterministic, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v4' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v4' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelInterleaveDatasetV4", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, + buffer_output_elements=buffer_output_elements, + prefetch_input_elements=prefetch_input_elements, + num_parallel_calls=num_parallel_calls, + f=f, output_types=output_types, + output_shapes=output_shapes, + deterministic=deterministic, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "deterministic", + _op.get_attr("deterministic"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelInterleaveDatasetV4", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelInterleaveDatasetV4 = tf_export("raw_ops.ParallelInterleaveDatasetV4")(_ops.to_raw_op(parallel_interleave_dataset_v4)) + + +def parallel_interleave_dataset_v4_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset_v4' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset_v4' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + buffer_output_elements = _ops.convert_to_tensor(buffer_output_elements, _dtypes.int64) + prefetch_input_elements = _ops.convert_to_tensor(prefetch_input_elements, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, buffer_output_elements, prefetch_input_elements, num_parallel_calls] + _attrs = ("f", f, "deterministic", deterministic, "Targuments", + _attr_Targuments, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + _result = _execute.execute(b"ParallelInterleaveDatasetV4", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelInterleaveDatasetV4", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_map_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int32], f, output_types, output_shapes, use_inter_op_parallelism:bool=True, sloppy:bool=False, preserve_cardinality:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + to `num_parallel_calls` copies of `f` in parallel. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + num_parallel_calls: A `Tensor` of type `int32`. + The number of concurrent invocations of `f` that process + elements from `input_dataset` in parallel. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_inter_op_parallelism: An optional `bool`. Defaults to `True`. + sloppy: An optional `bool`. Defaults to `False`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelMapDataset", name, input_dataset, other_arguments, + num_parallel_calls, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "sloppy", sloppy, "preserve_cardinality", + preserve_cardinality, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_map_dataset_eager_fallback( + input_dataset, other_arguments, num_parallel_calls, f=f, + output_types=output_types, output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, + preserve_cardinality=preserve_cardinality, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelMapDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + num_parallel_calls=num_parallel_calls, f=f, + output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + sloppy=sloppy, + preserve_cardinality=preserve_cardinality, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_inter_op_parallelism", + _op._get_attr_bool("use_inter_op_parallelism"), "sloppy", + _op._get_attr_bool("sloppy"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelMapDataset = tf_export("raw_ops.ParallelMapDataset")(_ops.to_raw_op(parallel_map_dataset)) + + +def parallel_map_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int32], f, output_types, output_shapes, use_inter_op_parallelism: bool, sloppy: bool, preserve_cardinality: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int32) + _inputs_flat = [input_dataset] + list(other_arguments) + [num_parallel_calls] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "sloppy", sloppy, "preserve_cardinality", + preserve_cardinality, "metadata", metadata) + _result = _execute.execute(b"ParallelMapDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_map_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, use_inter_op_parallelism:bool=True, deterministic:str="default", preserve_cardinality:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + to `num_parallel_calls` copies of `f` in parallel. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + num_parallel_calls: A `Tensor` of type `int64`. + The number of concurrent invocations of `f` that process + elements from `input_dataset` in parallel. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_inter_op_parallelism: An optional `bool`. Defaults to `True`. + deterministic: An optional `string`. Defaults to `"default"`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelMapDatasetV2", name, input_dataset, other_arguments, + num_parallel_calls, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "deterministic", deterministic, + "preserve_cardinality", preserve_cardinality, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_map_dataset_v2_eager_fallback( + input_dataset, other_arguments, num_parallel_calls, f=f, + output_types=output_types, output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + deterministic=deterministic, + preserve_cardinality=preserve_cardinality, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_map_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_map_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelMapDatasetV2", input_dataset=input_dataset, + other_arguments=other_arguments, + num_parallel_calls=num_parallel_calls, f=f, + output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + deterministic=deterministic, + preserve_cardinality=preserve_cardinality, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_inter_op_parallelism", + _op._get_attr_bool("use_inter_op_parallelism"), "deterministic", + _op.get_attr("deterministic"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelMapDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelMapDatasetV2 = tf_export("raw_ops.ParallelMapDatasetV2")(_ops.to_raw_op(parallel_map_dataset_v2)) + + +def parallel_map_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, num_parallel_calls: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, use_inter_op_parallelism: bool, deterministic: str, preserve_cardinality: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_map_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_map_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [num_parallel_calls] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "deterministic", deterministic, + "preserve_cardinality", preserve_cardinality, "metadata", metadata) + _result = _execute.execute(b"ParallelMapDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelMapDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def prefetch_dataset(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], output_types, output_shapes, slack_period:int=0, legacy_autotune:bool=True, buffer_size_min:int=0, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that asynchronously prefetches elements from `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + The maximum number of elements to buffer in an iterator over + this dataset. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + slack_period: An optional `int`. Defaults to `0`. + legacy_autotune: An optional `bool`. Defaults to `True`. + buffer_size_min: An optional `int`. Defaults to `0`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PrefetchDataset", name, input_dataset, buffer_size, + "output_types", output_types, "output_shapes", output_shapes, + "slack_period", slack_period, "legacy_autotune", legacy_autotune, + "buffer_size_min", buffer_size_min, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return prefetch_dataset_eager_fallback( + input_dataset, buffer_size, output_types=output_types, + output_shapes=output_shapes, slack_period=slack_period, + legacy_autotune=legacy_autotune, buffer_size_min=buffer_size_min, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'prefetch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'prefetch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if slack_period is None: + slack_period = 0 + slack_period = _execute.make_int(slack_period, "slack_period") + if legacy_autotune is None: + legacy_autotune = True + legacy_autotune = _execute.make_bool(legacy_autotune, "legacy_autotune") + if buffer_size_min is None: + buffer_size_min = 0 + buffer_size_min = _execute.make_int(buffer_size_min, "buffer_size_min") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PrefetchDataset", input_dataset=input_dataset, + buffer_size=buffer_size, output_types=output_types, + output_shapes=output_shapes, + slack_period=slack_period, + legacy_autotune=legacy_autotune, + buffer_size_min=buffer_size_min, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "slack_period", + _op._get_attr_int("slack_period"), "legacy_autotune", + _op._get_attr_bool("legacy_autotune"), "buffer_size_min", + _op._get_attr_int("buffer_size_min"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PrefetchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PrefetchDataset = tf_export("raw_ops.PrefetchDataset")(_ops.to_raw_op(prefetch_dataset)) + + +def prefetch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], output_types, output_shapes, slack_period: int, legacy_autotune: bool, buffer_size_min: int, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'prefetch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'prefetch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if slack_period is None: + slack_period = 0 + slack_period = _execute.make_int(slack_period, "slack_period") + if legacy_autotune is None: + legacy_autotune = True + legacy_autotune = _execute.make_bool(legacy_autotune, "legacy_autotune") + if buffer_size_min is None: + buffer_size_min = 0 + buffer_size_min = _execute.make_int(buffer_size_min, "buffer_size_min") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + _inputs_flat = [input_dataset, buffer_size] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "slack_period", slack_period, "legacy_autotune", legacy_autotune, + "buffer_size_min", buffer_size_min, "metadata", metadata) + _result = _execute.execute(b"PrefetchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PrefetchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def range_dataset(start: Annotated[Any, _atypes.Int64], stop: Annotated[Any, _atypes.Int64], step: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", replicate_on_split:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset with a range of values. Corresponds to python's xrange. + + Args: + start: A `Tensor` of type `int64`. + corresponds to start in python's xrange(). + stop: A `Tensor` of type `int64`. + corresponds to stop in python's xrange(). + step: A `Tensor` of type `int64`. + corresponds to step in python's xrange(). + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + replicate_on_split: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RangeDataset", name, start, stop, step, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata, + "replicate_on_split", replicate_on_split) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return range_dataset_eager_fallback( + start, stop, step, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + replicate_on_split=replicate_on_split, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'range_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'range_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + if replicate_on_split is None: + replicate_on_split = False + replicate_on_split = _execute.make_bool(replicate_on_split, "replicate_on_split") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RangeDataset", start=start, stop=stop, step=step, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + replicate_on_split=replicate_on_split, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata"), "replicate_on_split", + _op._get_attr_bool("replicate_on_split")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RangeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RangeDataset = tf_export("raw_ops.RangeDataset")(_ops.to_raw_op(range_dataset)) + + +def range_dataset_eager_fallback(start: Annotated[Any, _atypes.Int64], stop: Annotated[Any, _atypes.Int64], step: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, replicate_on_split: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'range_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'range_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + if replicate_on_split is None: + replicate_on_split = False + replicate_on_split = _execute.make_bool(replicate_on_split, "replicate_on_split") + start = _ops.convert_to_tensor(start, _dtypes.int64) + stop = _ops.convert_to_tensor(stop, _dtypes.int64) + step = _ops.convert_to_tensor(step, _dtypes.int64) + _inputs_flat = [start, stop, step] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata, "replicate_on_split", replicate_on_split) + _result = _execute.execute(b"RangeDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RangeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def reduce_dataset(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, use_inter_op_parallelism:bool=True, metadata:str="", name=None): + r"""Reduces the input dataset to a singleton using a reduce function. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + initial_state: A list of `Tensor` objects. + A nested structure of tensors, representing the initial state of the + transformation. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + A function that maps `(old_state, input_element)` to `new_state`. It must take + two arguments and return a nested structures of tensors. The structure of + `new_state` must match the structure of `initial_state`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_inter_op_parallelism: An optional `bool`. Defaults to `True`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ReduceDataset", name, input_dataset, initial_state, + other_arguments, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return reduce_dataset_eager_fallback( + input_dataset, initial_state, other_arguments, f=f, + output_types=output_types, output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'reduce_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'reduce_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ReduceDataset", input_dataset=input_dataset, + initial_state=initial_state, + other_arguments=other_arguments, f=f, + output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + metadata=metadata, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Tstate", _op.get_attr("Tstate"), + "Targuments", _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_inter_op_parallelism", + _op._get_attr_bool("use_inter_op_parallelism"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ReduceDataset", _inputs_flat, _attrs, _result) + return _result + +ReduceDataset = tf_export("raw_ops.ReduceDataset")(_ops.to_raw_op(reduce_dataset)) + + +def reduce_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, use_inter_op_parallelism: bool, metadata: str, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'reduce_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'reduce_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Tstate, initial_state = _execute.convert_to_mixed_eager_tensors(initial_state, ctx) + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(initial_state) + list(other_arguments) + _attrs = ("f", f, "Tstate", _attr_Tstate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes, + "use_inter_op_parallelism", use_inter_op_parallelism, "metadata", metadata) + _result = _execute.execute(b"ReduceDataset", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ReduceDataset", _inputs_flat, _attrs, _result) + return _result + + +def repeat_dataset(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the outputs of `input_dataset` `count` times. + + Args: + input_dataset: A `Tensor` of type `variant`. + count: A `Tensor` of type `int64`. + A scalar representing the number of times that `input_dataset` should + be repeated. A value of `-1` indicates that it should be repeated infinitely. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RepeatDataset", name, input_dataset, count, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return repeat_dataset_eager_fallback( + input_dataset, count, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'repeat_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'repeat_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RepeatDataset", input_dataset=input_dataset, count=count, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RepeatDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RepeatDataset = tf_export("raw_ops.RepeatDataset")(_ops.to_raw_op(repeat_dataset)) + + +def repeat_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'repeat_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'repeat_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + count = _ops.convert_to_tensor(count, _dtypes.int64) + _inputs_flat = [input_dataset, count] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"RepeatDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RepeatDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def rewrite_dataset(input_dataset: Annotated[Any, _atypes.Variant], rewrite_name: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + rewrite_name: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RewriteDataset", name, input_dataset, rewrite_name, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rewrite_dataset_eager_fallback( + input_dataset, rewrite_name, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rewrite_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rewrite_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RewriteDataset", input_dataset=input_dataset, + rewrite_name=rewrite_name, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RewriteDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RewriteDataset = tf_export("raw_ops.RewriteDataset")(_ops.to_raw_op(rewrite_dataset)) + + +def rewrite_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], rewrite_name: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rewrite_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rewrite_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + rewrite_name = _ops.convert_to_tensor(rewrite_name, _dtypes.string) + _inputs_flat = [input_dataset, rewrite_name] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"RewriteDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RewriteDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def serialize_iterator(resource_handle: Annotated[Any, _atypes.Resource], external_state_policy:int=0, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Converts the given `resource_handle` representing an iterator to a variant tensor. + + Args: + resource_handle: A `Tensor` of type `resource`. + A handle to an iterator resource. + external_state_policy: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SerializeIterator", name, resource_handle, + "external_state_policy", external_state_policy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return serialize_iterator_eager_fallback( + resource_handle, external_state_policy=external_state_policy, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if external_state_policy is None: + external_state_policy = 0 + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SerializeIterator", resource_handle=resource_handle, + external_state_policy=external_state_policy, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("external_state_policy", + _op._get_attr_int("external_state_policy")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SerializeIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SerializeIterator = tf_export("raw_ops.SerializeIterator")(_ops.to_raw_op(serialize_iterator)) + + +def serialize_iterator_eager_fallback(resource_handle: Annotated[Any, _atypes.Resource], external_state_policy: int, name, ctx) -> Annotated[Any, _atypes.Variant]: + if external_state_policy is None: + external_state_policy = 0 + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + resource_handle = _ops.convert_to_tensor(resource_handle, _dtypes.resource) + _inputs_flat = [resource_handle] + _attrs = ("external_state_policy", external_state_policy) + _result = _execute.execute(b"SerializeIterator", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SerializeIterator", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shard_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_shards: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, require_non_empty:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_shards: A `Tensor` of type `int64`. + An integer representing the number of shards operating in parallel. + index: A `Tensor` of type `int64`. + An integer representing the current worker index. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + require_non_empty: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShardDataset", name, input_dataset, num_shards, index, + "require_non_empty", require_non_empty, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shard_dataset_eager_fallback( + input_dataset, num_shards, index, + require_non_empty=require_non_empty, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if require_non_empty is None: + require_non_empty = False + require_non_empty = _execute.make_bool(require_non_empty, "require_non_empty") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShardDataset", input_dataset=input_dataset, num_shards=num_shards, + index=index, output_types=output_types, + output_shapes=output_shapes, + require_non_empty=require_non_empty, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("require_non_empty", _op._get_attr_bool("require_non_empty"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShardDataset = tf_export("raw_ops.ShardDataset")(_ops.to_raw_op(shard_dataset)) + + +def shard_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_shards: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, require_non_empty: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if require_non_empty is None: + require_non_empty = False + require_non_empty = _execute.make_bool(require_non_empty, "require_non_empty") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_shards = _ops.convert_to_tensor(num_shards, _dtypes.int64) + index = _ops.convert_to_tensor(index, _dtypes.int64) + _inputs_flat = [input_dataset, num_shards, index] + _attrs = ("require_non_empty", require_non_empty, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"ShardDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shuffle_and_repeat_dataset(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, reshuffle_each_iteration:bool=True, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that shuffles and repeats elements from `input_dataset` + + pseudorandomly. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + The number of output elements to buffer in an iterator over + this dataset. Compare with the `min_after_dequeue` attr when creating a + `RandomShuffleQueue`. + seed: A `Tensor` of type `int64`. + A scalar seed for the random number generator. If either `seed` or + `seed2` is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used. + seed2: A `Tensor` of type `int64`. + A second scalar seed to avoid seed collision. + count: A `Tensor` of type `int64`. + A scalar representing the number of times the underlying dataset + should be repeated. The default is `-1`, which results in infinite repetition. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reshuffle_each_iteration: An optional `bool`. Defaults to `True`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShuffleAndRepeatDataset", name, input_dataset, buffer_size, + seed, seed2, count, "output_types", output_types, "output_shapes", + output_shapes, "reshuffle_each_iteration", reshuffle_each_iteration, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shuffle_and_repeat_dataset_eager_fallback( + input_dataset, buffer_size, seed, seed2, count, + output_types=output_types, output_shapes=output_shapes, + reshuffle_each_iteration=reshuffle_each_iteration, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_and_repeat_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_and_repeat_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShuffleAndRepeatDataset", input_dataset=input_dataset, + buffer_size=buffer_size, seed=seed, + seed2=seed2, count=count, + output_types=output_types, + output_shapes=output_shapes, + reshuffle_each_iteration=reshuffle_each_iteration, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "reshuffle_each_iteration", + _op._get_attr_bool("reshuffle_each_iteration"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShuffleAndRepeatDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShuffleAndRepeatDataset = tf_export("raw_ops.ShuffleAndRepeatDataset")(_ops.to_raw_op(shuffle_and_repeat_dataset)) + + +def shuffle_and_repeat_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, reshuffle_each_iteration: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_and_repeat_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_and_repeat_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + count = _ops.convert_to_tensor(count, _dtypes.int64) + _inputs_flat = [input_dataset, buffer_size, seed, seed2, count] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "reshuffle_each_iteration", reshuffle_each_iteration, "metadata", metadata) + _result = _execute.execute(b"ShuffleAndRepeatDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShuffleAndRepeatDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shuffle_and_repeat_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], count: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, reshuffle_each_iteration:bool=True, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + seed: A `Tensor` of type `int64`. + seed2: A `Tensor` of type `int64`. + count: A `Tensor` of type `int64`. + seed_generator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reshuffle_each_iteration: An optional `bool`. Defaults to `True`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShuffleAndRepeatDatasetV2", name, input_dataset, buffer_size, + seed, seed2, count, seed_generator, "reshuffle_each_iteration", + reshuffle_each_iteration, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shuffle_and_repeat_dataset_v2_eager_fallback( + input_dataset, buffer_size, seed, seed2, count, seed_generator, + reshuffle_each_iteration=reshuffle_each_iteration, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_and_repeat_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_and_repeat_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShuffleAndRepeatDatasetV2", input_dataset=input_dataset, + buffer_size=buffer_size, seed=seed, + seed2=seed2, count=count, + seed_generator=seed_generator, + output_types=output_types, + output_shapes=output_shapes, + reshuffle_each_iteration=reshuffle_each_iteration, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("reshuffle_each_iteration", + _op._get_attr_bool("reshuffle_each_iteration"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShuffleAndRepeatDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShuffleAndRepeatDatasetV2 = tf_export("raw_ops.ShuffleAndRepeatDatasetV2")(_ops.to_raw_op(shuffle_and_repeat_dataset_v2)) + + +def shuffle_and_repeat_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], count: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, reshuffle_each_iteration: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_and_repeat_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_and_repeat_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + count = _ops.convert_to_tensor(count, _dtypes.int64) + seed_generator = _ops.convert_to_tensor(seed_generator, _dtypes.resource) + _inputs_flat = [input_dataset, buffer_size, seed, seed2, count, seed_generator] + _attrs = ("reshuffle_each_iteration", reshuffle_each_iteration, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"ShuffleAndRepeatDatasetV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShuffleAndRepeatDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shuffle_dataset(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, reshuffle_each_iteration:bool=True, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that shuffles elements from `input_dataset` pseudorandomly. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + The number of output elements to buffer in an iterator over + this dataset. Compare with the `min_after_dequeue` attr when creating a + `RandomShuffleQueue`. + seed: A `Tensor` of type `int64`. + A scalar seed for the random number generator. If either `seed` or + `seed2` is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used. + seed2: A `Tensor` of type `int64`. + A second scalar seed to avoid seed collision. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reshuffle_each_iteration: An optional `bool`. Defaults to `True`. + If true, each iterator over this dataset will be given + a different pseudorandomly generated seed, based on a sequence seeded by the + `seed` and `seed2` inputs. If false, each iterator will be given the same + seed, and repeated iteration over this dataset will yield the exact same + sequence of results. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShuffleDataset", name, input_dataset, buffer_size, seed, seed2, + "reshuffle_each_iteration", reshuffle_each_iteration, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shuffle_dataset_eager_fallback( + input_dataset, buffer_size, seed, seed2, + reshuffle_each_iteration=reshuffle_each_iteration, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShuffleDataset", input_dataset=input_dataset, + buffer_size=buffer_size, seed=seed, seed2=seed2, + output_types=output_types, + output_shapes=output_shapes, + reshuffle_each_iteration=reshuffle_each_iteration, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("reshuffle_each_iteration", + _op._get_attr_bool("reshuffle_each_iteration"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShuffleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShuffleDataset = tf_export("raw_ops.ShuffleDataset")(_ops.to_raw_op(shuffle_dataset)) + + +def shuffle_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, reshuffle_each_iteration: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + _inputs_flat = [input_dataset, buffer_size, seed, seed2] + _attrs = ("reshuffle_each_iteration", reshuffle_each_iteration, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"ShuffleDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShuffleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shuffle_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + seed_generator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShuffleDatasetV2", name, input_dataset, buffer_size, + seed_generator, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shuffle_dataset_v2_eager_fallback( + input_dataset, buffer_size, seed_generator, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShuffleDatasetV2", input_dataset=input_dataset, + buffer_size=buffer_size, + seed_generator=seed_generator, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShuffleDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShuffleDatasetV2 = tf_export("raw_ops.ShuffleDatasetV2")(_ops.to_raw_op(shuffle_dataset_v2)) + + +def shuffle_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + seed_generator = _ops.convert_to_tensor(seed_generator, _dtypes.resource) + _inputs_flat = [input_dataset, buffer_size, seed_generator] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"ShuffleDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShuffleDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def shuffle_dataset_v3(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, reshuffle_each_iteration:bool=True, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + buffer_size: A `Tensor` of type `int64`. + seed: A `Tensor` of type `int64`. + seed2: A `Tensor` of type `int64`. + seed_generator: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reshuffle_each_iteration: An optional `bool`. Defaults to `True`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShuffleDatasetV3", name, input_dataset, buffer_size, seed, + seed2, seed_generator, "reshuffle_each_iteration", + reshuffle_each_iteration, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shuffle_dataset_v3_eager_fallback( + input_dataset, buffer_size, seed, seed2, seed_generator, + reshuffle_each_iteration=reshuffle_each_iteration, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShuffleDatasetV3", input_dataset=input_dataset, + buffer_size=buffer_size, seed=seed, seed2=seed2, + seed_generator=seed_generator, + output_types=output_types, + output_shapes=output_shapes, + reshuffle_each_iteration=reshuffle_each_iteration, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("reshuffle_each_iteration", + _op._get_attr_bool("reshuffle_each_iteration"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ShuffleDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ShuffleDatasetV3 = tf_export("raw_ops.ShuffleDatasetV3")(_ops.to_raw_op(shuffle_dataset_v3)) + + +def shuffle_dataset_v3_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], buffer_size: Annotated[Any, _atypes.Int64], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, reshuffle_each_iteration: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'shuffle_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'shuffle_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if reshuffle_each_iteration is None: + reshuffle_each_iteration = True + reshuffle_each_iteration = _execute.make_bool(reshuffle_each_iteration, "reshuffle_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + seed_generator = _ops.convert_to_tensor(seed_generator, _dtypes.resource) + _inputs_flat = [input_dataset, buffer_size, seed, seed2, seed_generator] + _attrs = ("reshuffle_each_iteration", reshuffle_each_iteration, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"ShuffleDatasetV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ShuffleDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def skip_dataset(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that skips `count` elements from the `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + count: A `Tensor` of type `int64`. + A scalar representing the number of elements from the `input_dataset` + that should be skipped. If count is -1, skips everything. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SkipDataset", name, input_dataset, count, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return skip_dataset_eager_fallback( + input_dataset, count, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'skip_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'skip_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SkipDataset", input_dataset=input_dataset, count=count, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SkipDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SkipDataset = tf_export("raw_ops.SkipDataset")(_ops.to_raw_op(skip_dataset)) + + +def skip_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'skip_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'skip_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + count = _ops.convert_to_tensor(count, _dtypes.int64) + _inputs_flat = [input_dataset, count] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"SkipDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SkipDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseTensorSliceDataset_Tvalues = TypeVar("TV_SparseTensorSliceDataset_Tvalues", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def sparse_tensor_slice_dataset(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseTensorSliceDataset_Tvalues], dense_shape: Annotated[Any, _atypes.Int64], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that splits a SparseTensor into elements row-wise. + + Args: + indices: A `Tensor` of type `int64`. + values: A `Tensor`. + dense_shape: A `Tensor` of type `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseTensorSliceDataset", name, indices, values, dense_shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_tensor_slice_dataset_eager_fallback( + indices, values, dense_shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseTensorSliceDataset", indices=indices, values=values, + dense_shape=dense_shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tvalues", _op._get_attr_type("Tvalues")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseTensorSliceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseTensorSliceDataset = tf_export("raw_ops.SparseTensorSliceDataset")(_ops.to_raw_op(sparse_tensor_slice_dataset)) + + +def sparse_tensor_slice_dataset_eager_fallback(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseTensorSliceDataset_Tvalues], dense_shape: Annotated[Any, _atypes.Int64], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_Tvalues, (values,) = _execute.args_to_matching_eager([values], ctx, []) + indices = _ops.convert_to_tensor(indices, _dtypes.int64) + dense_shape = _ops.convert_to_tensor(dense_shape, _dtypes.int64) + _inputs_flat = [indices, values, dense_shape] + _attrs = ("Tvalues", _attr_Tvalues) + _result = _execute.execute(b"SparseTensorSliceDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseTensorSliceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tf_record_dataset(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the records from one or more TFRecord files. + + Args: + filenames: A `Tensor` of type `string`. + A scalar or vector containing the name(s) of the file(s) to be + read. + compression_type: A `Tensor` of type `string`. + A scalar containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + buffer_size: A `Tensor` of type `int64`. + A scalar representing the number of bytes to buffer. A value of + 0 means no buffering will be performed. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TFRecordDataset", name, filenames, compression_type, + buffer_size, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tf_record_dataset_eager_fallback( + filenames, compression_type, buffer_size, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TFRecordDataset", filenames=filenames, + compression_type=compression_type, + buffer_size=buffer_size, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TFRecordDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TFRecordDataset = tf_export("raw_ops.TFRecordDataset")(_ops.to_raw_op(tf_record_dataset)) + + +def tf_record_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"TFRecordDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TFRecordDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tf_record_dataset_v2(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], byte_offsets: Annotated[Any, _atypes.Int64], metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the records from one or more TFRecord files. + + Args: + filenames: A `Tensor` of type `string`. + A scalar or vector containing the name(s) of the file(s) to be + read. + compression_type: A `Tensor` of type `string`. + A scalar containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + buffer_size: A `Tensor` of type `int64`. + A scalar representing the number of bytes to buffer. A value of + 0 means no buffering will be performed. + byte_offsets: A `Tensor` of type `int64`. + A scalar or vector containing the number of bytes for each file + that will be skipped prior to reading. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TFRecordDatasetV2", name, filenames, compression_type, + buffer_size, byte_offsets, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tf_record_dataset_v2_eager_fallback( + filenames, compression_type, buffer_size, byte_offsets, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TFRecordDatasetV2", filenames=filenames, + compression_type=compression_type, + buffer_size=buffer_size, + byte_offsets=byte_offsets, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TFRecordDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TFRecordDatasetV2 = tf_export("raw_ops.TFRecordDatasetV2")(_ops.to_raw_op(tf_record_dataset_v2)) + + +def tf_record_dataset_v2_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], byte_offsets: Annotated[Any, _atypes.Int64], metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + byte_offsets = _ops.convert_to_tensor(byte_offsets, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size, byte_offsets] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"TFRecordDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TFRecordDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def take_dataset(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that contains `count` elements from the `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + count: A `Tensor` of type `int64`. + A scalar representing the number of elements from the `input_dataset` + that should be taken. A value of `-1` indicates that all of `input_dataset` + is taken. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TakeDataset", name, input_dataset, count, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return take_dataset_eager_fallback( + input_dataset, count, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'take_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'take_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TakeDataset", input_dataset=input_dataset, count=count, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TakeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TakeDataset = tf_export("raw_ops.TakeDataset")(_ops.to_raw_op(take_dataset)) + + +def take_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], count: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'take_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'take_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + count = _ops.convert_to_tensor(count, _dtypes.int64) + _inputs_flat = [input_dataset, count] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"TakeDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TakeDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tensor_dataset(components, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits `components` as a tuple of tensors once. + + Args: + components: A list of `Tensor` objects. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorDataset", name, components, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_dataset_eager_fallback( + components, output_shapes=output_shapes, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'tensor_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorDataset", components=components, output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Toutput_types", _op.get_attr("Toutput_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorDataset = tf_export("raw_ops.TensorDataset")(_ops.to_raw_op(tensor_dataset)) + + +def tensor_dataset_eager_fallback(components, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'tensor_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Toutput_types, components = _execute.convert_to_mixed_eager_tensors(components, ctx) + _inputs_flat = list(components) + _attrs = ("Toutput_types", _attr_Toutput_types, "output_shapes", + output_shapes, "metadata", metadata) + _result = _execute.execute(b"TensorDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tensor_slice_dataset(components, output_shapes, is_files:bool=False, metadata:str="", replicate_on_split:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits each dim-0 slice of `components` once. + + Args: + components: A list of `Tensor` objects. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + is_files: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + replicate_on_split: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorSliceDataset", name, components, "output_shapes", + output_shapes, "is_files", is_files, "metadata", metadata, + "replicate_on_split", replicate_on_split) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_slice_dataset_eager_fallback( + components, output_shapes=output_shapes, is_files=is_files, + metadata=metadata, replicate_on_split=replicate_on_split, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'tensor_slice_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if is_files is None: + is_files = False + is_files = _execute.make_bool(is_files, "is_files") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + if replicate_on_split is None: + replicate_on_split = False + replicate_on_split = _execute.make_bool(replicate_on_split, "replicate_on_split") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorSliceDataset", components=components, + output_shapes=output_shapes, is_files=is_files, + metadata=metadata, + replicate_on_split=replicate_on_split, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Toutput_types", _op.get_attr("Toutput_types"), "output_shapes", + _op.get_attr("output_shapes"), "is_files", + _op._get_attr_bool("is_files"), "metadata", + _op.get_attr("metadata"), "replicate_on_split", + _op._get_attr_bool("replicate_on_split")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorSliceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorSliceDataset = tf_export("raw_ops.TensorSliceDataset")(_ops.to_raw_op(tensor_slice_dataset)) + + +def tensor_slice_dataset_eager_fallback(components, output_shapes, is_files: bool, metadata: str, replicate_on_split: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'tensor_slice_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if is_files is None: + is_files = False + is_files = _execute.make_bool(is_files, "is_files") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + if replicate_on_split is None: + replicate_on_split = False + replicate_on_split = _execute.make_bool(replicate_on_split, "replicate_on_split") + _attr_Toutput_types, components = _execute.convert_to_mixed_eager_tensors(components, ctx) + _inputs_flat = list(components) + _attrs = ("Toutput_types", _attr_Toutput_types, "output_shapes", + output_shapes, "is_files", is_files, "metadata", metadata, + "replicate_on_split", replicate_on_split) + _result = _execute.execute(b"TensorSliceDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorSliceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def text_line_dataset(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the lines of one or more text files. + + Args: + filenames: A `Tensor` of type `string`. + A scalar or a vector containing the name(s) of the file(s) to be + read. + compression_type: A `Tensor` of type `string`. + A scalar containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + buffer_size: A `Tensor` of type `int64`. + A scalar containing the number of bytes to buffer. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TextLineDataset", name, filenames, compression_type, + buffer_size, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return text_line_dataset_eager_fallback( + filenames, compression_type, buffer_size, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TextLineDataset", filenames=filenames, + compression_type=compression_type, + buffer_size=buffer_size, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TextLineDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TextLineDataset = tf_export("raw_ops.TextLineDataset")(_ops.to_raw_op(text_line_dataset)) + + +def text_line_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"TextLineDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TextLineDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def unwrap_dataset_variant(input_handle: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_handle: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnwrapDatasetVariant", name, input_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unwrap_dataset_variant_eager_fallback( + input_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnwrapDatasetVariant", input_handle=input_handle, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnwrapDatasetVariant", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnwrapDatasetVariant = tf_export("raw_ops.UnwrapDatasetVariant")(_ops.to_raw_op(unwrap_dataset_variant)) + + +def unwrap_dataset_variant_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Variant]: + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle] + _attrs = None + _result = _execute.execute(b"UnwrapDatasetVariant", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnwrapDatasetVariant", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def window_dataset(input_dataset: Annotated[Any, _atypes.Variant], size: Annotated[Any, _atypes.Int64], shift: Annotated[Any, _atypes.Int64], stride: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r""" Combines (nests of) input elements into a dataset of (nests of) windows. + + A "window" is a finite dataset of flat elements of size `size` (or possibly + fewer if there are not enough input elements to fill the window and + `drop_remainder` evaluates to false). + + The `shift` argument determines the number of input elements by which + the window moves on each iteration. The first element in the `k`th window + will be element + + ``` + 1 + (k-1) * shift + ``` + + of the input dataset. In particular, the first element of the first window + will always be the first element of the input dataset. + + If the `stride` parameter is greater than 1, then each window will skip + `(stride - 1)` input elements between each element that appears in the + window. Output windows will still contain `size` elements regardless of + the value of `stride`. + + The `stride` argument determines the stride of the input elements, and the + `shift` argument determines the shift of the window. + + For example, letting `{...}` to represent a Dataset: + + - `tf.data.Dataset.range(7).window(2)` produces + `{{0, 1}, {2, 3}, {4, 5}, {6}}` + - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces + `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` + - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces + `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` + + Note that when the `window` transformation is applied to a dataset of + nested elements, it produces a dataset of nested windows. + + For example: + + - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` + produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}` + - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)` + produces `{{"a": {0, 1}}, {"a": {2, 3}}}` + + Args: + input_dataset: A `Tensor` of type `variant`. + size: A `Tensor` of type `int64`. + An integer scalar, representing the number of elements + of the input dataset to combine into a window. Must be positive. + shift: A `Tensor` of type `int64`. + An integer scalar, representing the number of input elements + by which the window moves in each iteration. Defaults to `size`. + Must be positive. + stride: A `Tensor` of type `int64`. + An integer scalar, representing the stride of the input elements + in the sliding window. Must be positive. The default value of 1 means + "retain every input element". + drop_remainder: A `Tensor` of type `bool`. + A Boolean scalar, representing whether the last window should be + dropped if its size is smaller than `window_size`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "WindowDataset", name, input_dataset, size, shift, stride, + drop_remainder, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return window_dataset_eager_fallback( + input_dataset, size, shift, stride, drop_remainder, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "WindowDataset", input_dataset=input_dataset, size=size, shift=shift, + stride=stride, drop_remainder=drop_remainder, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "WindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +WindowDataset = tf_export("raw_ops.WindowDataset")(_ops.to_raw_op(window_dataset)) + + +def window_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], size: Annotated[Any, _atypes.Int64], shift: Annotated[Any, _atypes.Int64], stride: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + size = _ops.convert_to_tensor(size, _dtypes.int64) + shift = _ops.convert_to_tensor(shift, _dtypes.int64) + stride = _ops.convert_to_tensor(stride, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset, size, shift, stride, drop_remainder] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"WindowDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "WindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def window_op(inputs, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + inputs: A list of `Tensor` objects. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "WindowOp", name, inputs, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return window_op_eager_fallback( + inputs, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'window_op' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'window_op' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "WindowOp", inputs=inputs, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "Tinputs", + _op.get_attr("Tinputs")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "WindowOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +WindowOp = tf_export("raw_ops.WindowOp")(_ops.to_raw_op(window_op)) + + +def window_op_eager_fallback(inputs, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'window_op' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'window_op' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Tinputs, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = list(inputs) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "Tinputs", _attr_Tinputs) + _result = _execute.execute(b"WindowOp", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "WindowOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def wrap_dataset_variant(input_handle: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_handle: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "WrapDatasetVariant", name, input_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return wrap_dataset_variant_eager_fallback( + input_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "WrapDatasetVariant", input_handle=input_handle, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "WrapDatasetVariant", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +WrapDatasetVariant = tf_export("raw_ops.WrapDatasetVariant")(_ops.to_raw_op(wrap_dataset_variant)) + + +def wrap_dataset_variant_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Variant]: + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle] + _attrs = None + _result = _execute.execute(b"WrapDatasetVariant", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "WrapDatasetVariant", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def zip_dataset(input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that zips together `input_datasets`. + + The elements of the resulting dataset are created by zipping corresponding + elements from each of the input datasets. + + The size of the resulting dataset will match the size of the smallest input + dataset, and no error will be raised if input datasets have different sizes. + + Args: + input_datasets: A list of at least 1 `Tensor` objects with type `variant`. + List of `N` variant Tensors representing datasets to be zipped together. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ZipDataset", name, input_datasets, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return zip_dataset_eager_fallback( + input_datasets, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'zip_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'zip_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'zip_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ZipDataset", input_datasets=input_datasets, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N"), + "metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ZipDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ZipDataset = tf_export("raw_ops.ZipDataset")(_ops.to_raw_op(zip_dataset)) + + +def zip_dataset_eager_fallback(input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'zip_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'zip_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'zip_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_datasets = _ops.convert_n_to_tensor(input_datasets, _dtypes.variant) + _inputs_flat = list(input_datasets) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, "N", + _attr_N, "metadata", metadata) + _result = _execute.execute(b"ZipDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ZipDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_encode_proto_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_encode_proto_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..1c8ddd410b03865f255c53c9fc269c448bdff03a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_encode_proto_ops.py @@ -0,0 +1,190 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.encode_proto') +def encode_proto(sizes: Annotated[Any, _atypes.Int32], values, field_names, message_type: str, descriptor_source:str="local://", name=None) -> Annotated[Any, _atypes.String]: + r"""The op serializes protobuf messages provided in the input tensors. + + The types of the tensors in `values` must match the schema for the fields + specified in `field_names`. All the tensors in `values` must have a common + shape prefix, *batch_shape*. + + The `sizes` tensor specifies repeat counts for each field. The repeat count + (last dimension) of a each tensor in `values` must be greater than or equal + to corresponding repeat count in `sizes`. + + A `message_type` name must be provided to give context for the field names. + The actual message descriptor can be looked up either in the linked-in + descriptor pool or a filename provided by the caller using the + `descriptor_source` attribute. + + For the most part, the mapping between Proto field types and TensorFlow dtypes + is straightforward. However, there are a few special cases: + + - A proto field that contains a submessage or group can only be converted + to `DT_STRING` (the serialized submessage). This is to reduce the complexity + of the API. The resulting string can be used as input to another instance of + the decode_proto op. + + - TensorFlow lacks support for unsigned integers. The ops represent uint64 + types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + way). Unsigned int32 values can be represented exactly by specifying type + `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + the `output_types` attribute. + + The `descriptor_source` attribute selects the source of protocol + descriptors to consult when looking up `message_type`. This may be: + + - An empty string or "local://", in which case protocol descriptors are + created for C++ (not Python) proto definitions linked to the binary. + + - A file, in which case protocol descriptors are created from the file, + which is expected to contain a `FileDescriptorSet` serialized as a string. + NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + and `--include_imports` options to the protocol compiler `protoc`. + + - A "bytes://", in which protocol descriptors are created from ``, + which is expected to be a `FileDescriptorSet` serialized as a string. + + Args: + sizes: A `Tensor` of type `int32`. + Tensor of int32 with shape `[batch_shape, len(field_names)]`. + values: A list of `Tensor` objects. + List of tensors containing values for the corresponding field. + field_names: A list of `strings`. + List of strings containing proto field names. + message_type: A `string`. Name of the proto message type to decode. + descriptor_source: An optional `string`. Defaults to `"local://"`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EncodeProto", name, sizes, values, "field_names", field_names, + "message_type", message_type, "descriptor_source", descriptor_source) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_encode_proto( + (sizes, values, field_names, message_type, descriptor_source, + name,), None) + if _result is not NotImplemented: + return _result + return encode_proto_eager_fallback( + sizes, values, field_names=field_names, message_type=message_type, + descriptor_source=descriptor_source, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_proto, (), dict(sizes=sizes, values=values, + field_names=field_names, + message_type=message_type, + descriptor_source=descriptor_source, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_encode_proto( + (sizes, values, field_names, message_type, descriptor_source, name,), + None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(field_names, (list, tuple)): + raise TypeError( + "Expected list for 'field_names' argument to " + "'encode_proto' Op, not %r." % field_names) + field_names = [_execute.make_str(_s, "field_names") for _s in field_names] + message_type = _execute.make_str(message_type, "message_type") + if descriptor_source is None: + descriptor_source = "local://" + descriptor_source = _execute.make_str(descriptor_source, "descriptor_source") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EncodeProto", sizes=sizes, values=values, field_names=field_names, + message_type=message_type, + descriptor_source=descriptor_source, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_proto, (), dict(sizes=sizes, values=values, + field_names=field_names, + message_type=message_type, + descriptor_source=descriptor_source, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("field_names", _op.get_attr("field_names"), "message_type", + _op.get_attr("message_type"), "descriptor_source", + _op.get_attr("descriptor_source"), "Tinput_types", + _op.get_attr("Tinput_types")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "EncodeProto", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +EncodeProto = tf_export("raw_ops.EncodeProto")(_ops.to_raw_op(encode_proto)) +_dispatcher_for_encode_proto = encode_proto._tf_type_based_dispatcher.Dispatch + + +def encode_proto_eager_fallback(sizes: Annotated[Any, _atypes.Int32], values, field_names, message_type: str, descriptor_source: str, name, ctx) -> Annotated[Any, _atypes.String]: + if not isinstance(field_names, (list, tuple)): + raise TypeError( + "Expected list for 'field_names' argument to " + "'encode_proto' Op, not %r." % field_names) + field_names = [_execute.make_str(_s, "field_names") for _s in field_names] + message_type = _execute.make_str(message_type, "message_type") + if descriptor_source is None: + descriptor_source = "local://" + descriptor_source = _execute.make_str(descriptor_source, "descriptor_source") + _attr_Tinput_types, values = _execute.convert_to_mixed_eager_tensors(values, ctx) + sizes = _ops.convert_to_tensor(sizes, _dtypes.int32) + _inputs_flat = [sizes] + list(values) + _attrs = ("field_names", field_names, "message_type", message_type, + "descriptor_source", descriptor_source, "Tinput_types", _attr_Tinput_types) + _result = _execute.execute(b"EncodeProto", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "EncodeProto", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_filesystem_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_filesystem_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c8db7ca04c4878178a1586110c35cc020af96dd6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_filesystem_ops.py @@ -0,0 +1,72 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def file_system_set_configuration(scheme: Annotated[Any, _atypes.String], key: Annotated[Any, _atypes.String], value: Annotated[Any, _atypes.String], name=None): + r"""Set configuration of the file system. + + Args: + scheme: A `Tensor` of type `string`. File system scheme. + key: A `Tensor` of type `string`. The name of the configuration option. + value: A `Tensor` of type `string`. The value of the configuration option. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FileSystemSetConfiguration", name, scheme, key, value) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return file_system_set_configuration_eager_fallback( + scheme, key, value, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FileSystemSetConfiguration", scheme=scheme, key=key, value=value, + name=name) + return _op +FileSystemSetConfiguration = tf_export("raw_ops.FileSystemSetConfiguration")(_ops.to_raw_op(file_system_set_configuration)) + + +def file_system_set_configuration_eager_fallback(scheme: Annotated[Any, _atypes.String], key: Annotated[Any, _atypes.String], value: Annotated[Any, _atypes.String], name, ctx): + scheme = _ops.convert_to_tensor(scheme, _dtypes.string) + key = _ops.convert_to_tensor(key, _dtypes.string) + value = _ops.convert_to_tensor(value, _dtypes.string) + _inputs_flat = [scheme, key, value] + _attrs = None + _result = _execute.execute(b"FileSystemSetConfiguration", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_list_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_list_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..032f2bbe22dbc6b69b996ae57011d3886a02a5aa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_list_ops.py @@ -0,0 +1,1464 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_EmptyTensorList_element_dtype = TypeVar("TV_EmptyTensorList_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_EmptyTensorList_shape_type = TypeVar("TV_EmptyTensorList_shape_type", _atypes.Int32, _atypes.Int64) + +def empty_tensor_list(element_shape: Annotated[Any, TV_EmptyTensorList_shape_type], max_num_elements: Annotated[Any, _atypes.Int32], element_dtype: TV_EmptyTensorList_element_dtype, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates and returns an empty tensor list. + + All list elements must be tensors of dtype element_dtype and shape compatible + with element_shape. + + handle: an empty tensor list. + element_dtype: the type of elements in the list. + element_shape: a shape compatible with that of elements in the list. + + Args: + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + max_num_elements: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EmptyTensorList", name, element_shape, max_num_elements, + "element_dtype", element_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return empty_tensor_list_eager_fallback( + element_shape, max_num_elements, element_dtype=element_dtype, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EmptyTensorList", element_shape=element_shape, + max_num_elements=max_num_elements, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "EmptyTensorList", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +EmptyTensorList = tf_export("raw_ops.EmptyTensorList")(_ops.to_raw_op(empty_tensor_list)) + + +def empty_tensor_list_eager_fallback(element_shape: Annotated[Any, TV_EmptyTensorList_shape_type], max_num_elements: Annotated[Any, _atypes.Int32], element_dtype: TV_EmptyTensorList_element_dtype, name, ctx) -> Annotated[Any, _atypes.Variant]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + max_num_elements = _ops.convert_to_tensor(max_num_elements, _dtypes.int32) + _inputs_flat = [element_shape, max_num_elements] + _attrs = ("element_dtype", element_dtype, "shape_type", _attr_shape_type) + _result = _execute.execute(b"EmptyTensorList", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "EmptyTensorList", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_TensorListConcatOutput = collections.namedtuple( + "TensorListConcat", + ["tensor", "lengths"]) + + +TV_TensorListConcat_element_dtype = TypeVar("TV_TensorListConcat_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_concat(input_handle: Annotated[Any, _atypes.Variant], element_dtype: TV_TensorListConcat_element_dtype, element_shape=None, name=None): + r"""Concats all tensors in the list along the 0th dimension. + + Requires that all tensors have the same shape except the first dimension. + + input_handle: The input list. + tensor: The concated result. + lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + + Args: + input_handle: A `Tensor` of type `variant`. + element_dtype: A `tf.DType`. + element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (tensor, lengths). + + tensor: A `Tensor` of type `element_dtype`. + lengths: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListConcat", name, input_handle, "element_dtype", + element_dtype, "element_shape", element_shape) + _result = _TensorListConcatOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_concat_eager_fallback( + input_handle, element_dtype=element_dtype, + element_shape=element_shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + if element_shape is None: + element_shape = None + element_shape = _execute.make_shape(element_shape, "element_shape") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListConcat", input_handle=input_handle, + element_dtype=element_dtype, + element_shape=element_shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "element_shape", _op.get_attr("element_shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListConcat", _inputs_flat, _attrs, _result) + _result = _TensorListConcatOutput._make(_result) + return _result + +TensorListConcat = tf_export("raw_ops.TensorListConcat")(_ops.to_raw_op(tensor_list_concat)) + + +def tensor_list_concat_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], element_dtype: TV_TensorListConcat_element_dtype, element_shape, name, ctx): + element_dtype = _execute.make_type(element_dtype, "element_dtype") + if element_shape is None: + element_shape = None + element_shape = _execute.make_shape(element_shape, "element_shape") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle] + _attrs = ("element_dtype", element_dtype, "element_shape", element_shape) + _result = _execute.execute(b"TensorListConcat", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListConcat", _inputs_flat, _attrs, _result) + _result = _TensorListConcatOutput._make(_result) + return _result + + +TV_TensorListConcatLists_element_dtype = TypeVar("TV_TensorListConcatLists_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_concat_lists(input_a: Annotated[Any, _atypes.Variant], input_b: Annotated[Any, _atypes.Variant], element_dtype: TV_TensorListConcatLists_element_dtype, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_a: A `Tensor` of type `variant`. + input_b: A `Tensor` of type `variant`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListConcatLists", name, input_a, input_b, + "element_dtype", element_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_concat_lists_eager_fallback( + input_a, input_b, element_dtype=element_dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListConcatLists", input_a=input_a, input_b=input_b, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListConcatLists", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListConcatLists = tf_export("raw_ops.TensorListConcatLists")(_ops.to_raw_op(tensor_list_concat_lists)) + + +def tensor_list_concat_lists_eager_fallback(input_a: Annotated[Any, _atypes.Variant], input_b: Annotated[Any, _atypes.Variant], element_dtype: TV_TensorListConcatLists_element_dtype, name, ctx) -> Annotated[Any, _atypes.Variant]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + input_a = _ops.convert_to_tensor(input_a, _dtypes.variant) + input_b = _ops.convert_to_tensor(input_b, _dtypes.variant) + _inputs_flat = [input_a, input_b] + _attrs = ("element_dtype", element_dtype) + _result = _execute.execute(b"TensorListConcatLists", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListConcatLists", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_TensorListConcatV2Output = collections.namedtuple( + "TensorListConcatV2", + ["tensor", "lengths"]) + + +TV_TensorListConcatV2_element_dtype = TypeVar("TV_TensorListConcatV2_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListConcatV2_shape_type = TypeVar("TV_TensorListConcatV2_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_concat_v2(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, TV_TensorListConcatV2_shape_type], leading_dims: Annotated[Any, _atypes.Int64], element_dtype: TV_TensorListConcatV2_element_dtype, name=None): + r"""Concats all tensors in the list along the 0th dimension. + + Requires that all tensors have the same shape except the first dimension. + + input_handle: The input list. + element_shape: The shape of the uninitialized elements in the list. If the first + dimension is not -1, it is assumed that all list elements have the same + leading dim. + leading_dims: The list of leading dims of uninitialized list elements. Used if + the leading dim of input_handle.element_shape or the element_shape input arg + is not already set. + tensor: The concated result. + lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + + Args: + input_handle: A `Tensor` of type `variant`. + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + leading_dims: A `Tensor` of type `int64`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (tensor, lengths). + + tensor: A `Tensor` of type `element_dtype`. + lengths: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListConcatV2", name, input_handle, element_shape, + leading_dims, "element_dtype", element_dtype) + _result = _TensorListConcatV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_concat_v2_eager_fallback( + input_handle, element_shape, leading_dims, + element_dtype=element_dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListConcatV2", input_handle=input_handle, + element_shape=element_shape, + leading_dims=leading_dims, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListConcatV2", _inputs_flat, _attrs, _result) + _result = _TensorListConcatV2Output._make(_result) + return _result + +TensorListConcatV2 = tf_export("raw_ops.TensorListConcatV2")(_ops.to_raw_op(tensor_list_concat_v2)) + + +def tensor_list_concat_v2_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, TV_TensorListConcatV2_shape_type], leading_dims: Annotated[Any, _atypes.Int64], element_dtype: TV_TensorListConcatV2_element_dtype, name, ctx): + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + leading_dims = _ops.convert_to_tensor(leading_dims, _dtypes.int64) + _inputs_flat = [input_handle, element_shape, leading_dims] + _attrs = ("element_dtype", element_dtype, "shape_type", _attr_shape_type) + _result = _execute.execute(b"TensorListConcatV2", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListConcatV2", _inputs_flat, _attrs, _result) + _result = _TensorListConcatV2Output._make(_result) + return _result + + +TV_TensorListElementShape_shape_type = TypeVar("TV_TensorListElementShape_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_element_shape(input_handle: Annotated[Any, _atypes.Variant], shape_type: TV_TensorListElementShape_shape_type, name=None) -> Annotated[Any, TV_TensorListElementShape_shape_type]: + r"""The shape of the elements of the given list, as a tensor. + + input_handle: the list + element_shape: the shape of elements of the list + + Args: + input_handle: A `Tensor` of type `variant`. + shape_type: A `tf.DType` from: `tf.int32, tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `shape_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListElementShape", name, input_handle, "shape_type", + shape_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_element_shape_eager_fallback( + input_handle, shape_type=shape_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + shape_type = _execute.make_type(shape_type, "shape_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListElementShape", input_handle=input_handle, + shape_type=shape_type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListElementShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListElementShape = tf_export("raw_ops.TensorListElementShape")(_ops.to_raw_op(tensor_list_element_shape)) + + +def tensor_list_element_shape_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], shape_type: TV_TensorListElementShape_shape_type, name, ctx) -> Annotated[Any, TV_TensorListElementShape_shape_type]: + shape_type = _execute.make_type(shape_type, "shape_type") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle] + _attrs = ("shape_type", shape_type) + _result = _execute.execute(b"TensorListElementShape", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListElementShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListFromTensor_element_dtype = TypeVar("TV_TensorListFromTensor_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListFromTensor_shape_type = TypeVar("TV_TensorListFromTensor_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_from_tensor(tensor: Annotated[Any, TV_TensorListFromTensor_element_dtype], element_shape: Annotated[Any, TV_TensorListFromTensor_shape_type], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a TensorList which, when stacked, has the value of `tensor`. + + Each tensor in the result list corresponds to one row of the input tensor. + + tensor: The input tensor. + output_handle: The list. + + Args: + tensor: A `Tensor`. + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListFromTensor", name, tensor, element_shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_from_tensor_eager_fallback( + tensor, element_shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListFromTensor", tensor=tensor, element_shape=element_shape, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListFromTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListFromTensor = tf_export("raw_ops.TensorListFromTensor")(_ops.to_raw_op(tensor_list_from_tensor)) + + +def tensor_list_from_tensor_eager_fallback(tensor: Annotated[Any, TV_TensorListFromTensor_element_dtype], element_shape: Annotated[Any, TV_TensorListFromTensor_shape_type], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [tensor, element_shape] + _attrs = ("element_dtype", _attr_element_dtype, "shape_type", + _attr_shape_type) + _result = _execute.execute(b"TensorListFromTensor", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListFromTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListGather_element_dtype = TypeVar("TV_TensorListGather_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_gather(input_handle: Annotated[Any, _atypes.Variant], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListGather_element_dtype, name=None) -> Annotated[Any, TV_TensorListGather_element_dtype]: + r"""Creates a Tensor by indexing into the TensorList. + + Each row in the produced Tensor corresponds to the element in the TensorList + specified by the given index (see `tf.gather`). + + input_handle: The input tensor list. + indices: The indices used to index into the list. + values: The tensor. + + Args: + input_handle: A `Tensor` of type `variant`. + indices: A `Tensor` of type `int32`. + element_shape: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `element_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListGather", name, input_handle, indices, element_shape, + "element_dtype", element_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_gather_eager_fallback( + input_handle, indices, element_shape, element_dtype=element_dtype, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListGather", input_handle=input_handle, indices=indices, + element_shape=element_shape, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListGather = tf_export("raw_ops.TensorListGather")(_ops.to_raw_op(tensor_list_gather)) + + +def tensor_list_gather_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListGather_element_dtype, name, ctx) -> Annotated[Any, TV_TensorListGather_element_dtype]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + indices = _ops.convert_to_tensor(indices, _dtypes.int32) + element_shape = _ops.convert_to_tensor(element_shape, _dtypes.int32) + _inputs_flat = [input_handle, indices, element_shape] + _attrs = ("element_dtype", element_dtype) + _result = _execute.execute(b"TensorListGather", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListGetItem_element_dtype = TypeVar("TV_TensorListGetItem_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_get_item(input_handle: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListGetItem_element_dtype, name=None) -> Annotated[Any, TV_TensorListGetItem_element_dtype]: + r"""Returns the item in the list with the given index. + + input_handle: the list + index: the position in the list from which an element will be retrieved + item: the element at that position + + Args: + input_handle: A `Tensor` of type `variant`. + index: A `Tensor` of type `int32`. + element_shape: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `element_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListGetItem", name, input_handle, index, element_shape, + "element_dtype", element_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_get_item_eager_fallback( + input_handle, index, element_shape, element_dtype=element_dtype, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListGetItem", input_handle=input_handle, index=index, + element_shape=element_shape, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListGetItem", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListGetItem = tf_export("raw_ops.TensorListGetItem")(_ops.to_raw_op(tensor_list_get_item)) + + +def tensor_list_get_item_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListGetItem_element_dtype, name, ctx) -> Annotated[Any, TV_TensorListGetItem_element_dtype]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + index = _ops.convert_to_tensor(index, _dtypes.int32) + element_shape = _ops.convert_to_tensor(element_shape, _dtypes.int32) + _inputs_flat = [input_handle, index, element_shape] + _attrs = ("element_dtype", element_dtype) + _result = _execute.execute(b"TensorListGetItem", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListGetItem", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tensor_list_length(input_handle: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Int32]: + r"""Returns the number of tensors in the input tensor list. + + input_handle: the input list + length: the number of tensors in the list + + Args: + input_handle: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListLength", name, input_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_length_eager_fallback( + input_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListLength", input_handle=input_handle, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListLength", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListLength = tf_export("raw_ops.TensorListLength")(_ops.to_raw_op(tensor_list_length)) + + +def tensor_list_length_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Int32]: + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle] + _attrs = None + _result = _execute.execute(b"TensorListLength", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListLength", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_TensorListPopBackOutput = collections.namedtuple( + "TensorListPopBack", + ["output_handle", "tensor"]) + + +TV_TensorListPopBack_element_dtype = TypeVar("TV_TensorListPopBack_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_pop_back(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListPopBack_element_dtype, name=None): + r"""Returns the last element of the input list as well as a list with all but that element. + + Fails if the list is empty. + + input_handle: the input list + tensor: the withdrawn last element of the list + element_dtype: the type of elements in the list + element_shape: the shape of the output tensor + + Args: + input_handle: A `Tensor` of type `variant`. + element_shape: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_handle, tensor). + + output_handle: A `Tensor` of type `variant`. + tensor: A `Tensor` of type `element_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListPopBack", name, input_handle, element_shape, + "element_dtype", element_dtype) + _result = _TensorListPopBackOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_pop_back_eager_fallback( + input_handle, element_shape, element_dtype=element_dtype, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListPopBack", input_handle=input_handle, + element_shape=element_shape, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListPopBack", _inputs_flat, _attrs, _result) + _result = _TensorListPopBackOutput._make(_result) + return _result + +TensorListPopBack = tf_export("raw_ops.TensorListPopBack")(_ops.to_raw_op(tensor_list_pop_back)) + + +def tensor_list_pop_back_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListPopBack_element_dtype, name, ctx): + element_dtype = _execute.make_type(element_dtype, "element_dtype") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + element_shape = _ops.convert_to_tensor(element_shape, _dtypes.int32) + _inputs_flat = [input_handle, element_shape] + _attrs = ("element_dtype", element_dtype) + _result = _execute.execute(b"TensorListPopBack", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListPopBack", _inputs_flat, _attrs, _result) + _result = _TensorListPopBackOutput._make(_result) + return _result + + +TV_TensorListPushBack_element_dtype = TypeVar("TV_TensorListPushBack_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_push_back(input_handle: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListPushBack_element_dtype], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. + + tensor: The tensor to put on the list. + input_handle: The old list. + output_handle: A list with the elements of the old list followed by tensor. + element_dtype: the type of elements in the list. + element_shape: a shape compatible with that of elements in the list. + + Args: + input_handle: A `Tensor` of type `variant`. + tensor: A `Tensor`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListPushBack", name, input_handle, tensor) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_push_back_eager_fallback( + input_handle, tensor, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListPushBack", input_handle=input_handle, tensor=tensor, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListPushBack", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListPushBack = tf_export("raw_ops.TensorListPushBack")(_ops.to_raw_op(tensor_list_push_back)) + + +def tensor_list_push_back_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListPushBack_element_dtype], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + _inputs_flat = [input_handle, tensor] + _attrs = ("element_dtype", _attr_element_dtype) + _result = _execute.execute(b"TensorListPushBack", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListPushBack", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListPushBackBatch_element_dtype = TypeVar("TV_TensorListPushBackBatch_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_push_back_batch(input_handles: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListPushBackBatch_element_dtype], name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_handles: A `Tensor` of type `variant`. + tensor: A `Tensor`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListPushBackBatch", name, input_handles, tensor) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_push_back_batch_eager_fallback( + input_handles, tensor, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListPushBackBatch", input_handles=input_handles, tensor=tensor, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListPushBackBatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListPushBackBatch = tf_export("raw_ops.TensorListPushBackBatch")(_ops.to_raw_op(tensor_list_push_back_batch)) + + +def tensor_list_push_back_batch_eager_fallback(input_handles: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListPushBackBatch_element_dtype], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + input_handles = _ops.convert_to_tensor(input_handles, _dtypes.variant) + _inputs_flat = [input_handles, tensor] + _attrs = ("element_dtype", _attr_element_dtype) + _result = _execute.execute(b"TensorListPushBackBatch", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListPushBackBatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListReserve_element_dtype = TypeVar("TV_TensorListReserve_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListReserve_shape_type = TypeVar("TV_TensorListReserve_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_reserve(element_shape: Annotated[Any, TV_TensorListReserve_shape_type], num_elements: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListReserve_element_dtype, name=None) -> Annotated[Any, _atypes.Variant]: + r"""List of the given size with empty elements. + + element_shape: the shape of the future elements of the list + num_elements: the number of elements to reserve + handle: the output list + element_dtype: the desired type of elements in the list. + + Args: + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + num_elements: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListReserve", name, element_shape, num_elements, + "element_dtype", element_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_reserve_eager_fallback( + element_shape, num_elements, element_dtype=element_dtype, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListReserve", element_shape=element_shape, + num_elements=num_elements, + element_dtype=element_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListReserve", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListReserve = tf_export("raw_ops.TensorListReserve")(_ops.to_raw_op(tensor_list_reserve)) + + +def tensor_list_reserve_eager_fallback(element_shape: Annotated[Any, TV_TensorListReserve_shape_type], num_elements: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListReserve_element_dtype, name, ctx) -> Annotated[Any, _atypes.Variant]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + num_elements = _ops.convert_to_tensor(num_elements, _dtypes.int32) + _inputs_flat = [element_shape, num_elements] + _attrs = ("element_dtype", element_dtype, "shape_type", _attr_shape_type) + _result = _execute.execute(b"TensorListReserve", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListReserve", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tensor_list_resize(input_handle: Annotated[Any, _atypes.Variant], size: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Resizes the list. + + + input_handle: the input list + size: size of the output list + + Args: + input_handle: A `Tensor` of type `variant`. + size: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListResize", name, input_handle, size) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_resize_eager_fallback( + input_handle, size, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListResize", input_handle=input_handle, size=size, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListResize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListResize = tf_export("raw_ops.TensorListResize")(_ops.to_raw_op(tensor_list_resize)) + + +def tensor_list_resize_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], size: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.Variant]: + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + size = _ops.convert_to_tensor(size, _dtypes.int32) + _inputs_flat = [input_handle, size] + _attrs = None + _result = _execute.execute(b"TensorListResize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListResize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListScatter_element_dtype = TypeVar("TV_TensorListScatter_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListScatter_shape_type = TypeVar("TV_TensorListScatter_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_scatter(tensor: Annotated[Any, TV_TensorListScatter_element_dtype], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, TV_TensorListScatter_shape_type], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a TensorList by indexing into a Tensor. + + Each member of the TensorList corresponds to one row of the input tensor, + specified by the given index (see `tf.gather`). + + tensor: The input tensor. + indices: The indices used to index into the list. + element_shape: The shape of the elements in the list (can be less specified than + the shape of the tensor). + output_handle: The TensorList. + + Args: + tensor: A `Tensor`. + indices: A `Tensor` of type `int32`. + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListScatter", name, tensor, indices, element_shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_scatter_eager_fallback( + tensor, indices, element_shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListScatter", tensor=tensor, indices=indices, + element_shape=element_shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListScatter = tf_export("raw_ops.TensorListScatter")(_ops.to_raw_op(tensor_list_scatter)) + + +def tensor_list_scatter_eager_fallback(tensor: Annotated[Any, TV_TensorListScatter_element_dtype], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, TV_TensorListScatter_shape_type], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + indices = _ops.convert_to_tensor(indices, _dtypes.int32) + _inputs_flat = [tensor, indices, element_shape] + _attrs = ("element_dtype", _attr_element_dtype, "shape_type", + _attr_shape_type) + _result = _execute.execute(b"TensorListScatter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListScatterIntoExistingList_element_dtype = TypeVar("TV_TensorListScatterIntoExistingList_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_scatter_into_existing_list(input_handle: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListScatterIntoExistingList_element_dtype], indices: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Scatters tensor at indices in an input list. + + Each member of the TensorList corresponds to one row of the input tensor, + specified by the given index (see `tf.gather`). + + input_handle: The list to scatter into. + tensor: The input tensor. + indices: The indices used to index into the list. + output_handle: The TensorList. + + Args: + input_handle: A `Tensor` of type `variant`. + tensor: A `Tensor`. + indices: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListScatterIntoExistingList", name, input_handle, tensor, + indices) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_scatter_into_existing_list_eager_fallback( + input_handle, tensor, indices, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListScatterIntoExistingList", input_handle=input_handle, + tensor=tensor, indices=indices, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListScatterIntoExistingList", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListScatterIntoExistingList = tf_export("raw_ops.TensorListScatterIntoExistingList")(_ops.to_raw_op(tensor_list_scatter_into_existing_list)) + + +def tensor_list_scatter_into_existing_list_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], tensor: Annotated[Any, TV_TensorListScatterIntoExistingList_element_dtype], indices: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + indices = _ops.convert_to_tensor(indices, _dtypes.int32) + _inputs_flat = [input_handle, tensor, indices] + _attrs = ("element_dtype", _attr_element_dtype) + _result = _execute.execute(b"TensorListScatterIntoExistingList", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListScatterIntoExistingList", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListScatterV2_element_dtype = TypeVar("TV_TensorListScatterV2_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListScatterV2_shape_type = TypeVar("TV_TensorListScatterV2_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_scatter_v2(tensor: Annotated[Any, TV_TensorListScatterV2_element_dtype], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, TV_TensorListScatterV2_shape_type], num_elements: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a TensorList by indexing into a Tensor. + + Each member of the TensorList corresponds to one row of the input tensor, + specified by the given index (see `tf.gather`). + + tensor: The input tensor. + indices: The indices used to index into the list. + element_shape: The shape of the elements in the list (can be less specified than + the shape of the tensor). + num_elements: The size of the output list. Must be large enough to accommodate + the largest index in indices. If -1, the list is just large enough to include + the largest index in indices. + output_handle: The TensorList. + + Args: + tensor: A `Tensor`. + indices: A `Tensor` of type `int32`. + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + num_elements: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListScatterV2", name, tensor, indices, element_shape, + num_elements) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_scatter_v2_eager_fallback( + tensor, indices, element_shape, num_elements, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListScatterV2", tensor=tensor, indices=indices, + element_shape=element_shape, + num_elements=num_elements, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListScatterV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListScatterV2 = tf_export("raw_ops.TensorListScatterV2")(_ops.to_raw_op(tensor_list_scatter_v2)) + + +def tensor_list_scatter_v2_eager_fallback(tensor: Annotated[Any, TV_TensorListScatterV2_element_dtype], indices: Annotated[Any, _atypes.Int32], element_shape: Annotated[Any, TV_TensorListScatterV2_shape_type], num_elements: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + indices = _ops.convert_to_tensor(indices, _dtypes.int32) + num_elements = _ops.convert_to_tensor(num_elements, _dtypes.int32) + _inputs_flat = [tensor, indices, element_shape, num_elements] + _attrs = ("element_dtype", _attr_element_dtype, "shape_type", + _attr_shape_type) + _result = _execute.execute(b"TensorListScatterV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListScatterV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListSetItem_element_dtype = TypeVar("TV_TensorListSetItem_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_set_item(input_handle: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], item: Annotated[Any, TV_TensorListSetItem_element_dtype], resize_if_index_out_of_bounds:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Sets the index-th position of the list to contain the given tensor. + + input_handle: the list + index: the position in the list to which the tensor will be assigned + item: the element to be assigned to that position + output_handle: the new list, with the element in the proper position + + Args: + input_handle: A `Tensor` of type `variant`. + index: A `Tensor` of type `int32`. + item: A `Tensor`. + resize_if_index_out_of_bounds: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListSetItem", name, input_handle, index, item, + "resize_if_index_out_of_bounds", resize_if_index_out_of_bounds) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_set_item_eager_fallback( + input_handle, index, item, + resize_if_index_out_of_bounds=resize_if_index_out_of_bounds, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if resize_if_index_out_of_bounds is None: + resize_if_index_out_of_bounds = False + resize_if_index_out_of_bounds = _execute.make_bool(resize_if_index_out_of_bounds, "resize_if_index_out_of_bounds") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListSetItem", input_handle=input_handle, index=index, + item=item, + resize_if_index_out_of_bounds=resize_if_index_out_of_bounds, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "resize_if_index_out_of_bounds", + _op._get_attr_bool("resize_if_index_out_of_bounds")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListSetItem", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListSetItem = tf_export("raw_ops.TensorListSetItem")(_ops.to_raw_op(tensor_list_set_item)) + + +def tensor_list_set_item_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], item: Annotated[Any, TV_TensorListSetItem_element_dtype], resize_if_index_out_of_bounds: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if resize_if_index_out_of_bounds is None: + resize_if_index_out_of_bounds = False + resize_if_index_out_of_bounds = _execute.make_bool(resize_if_index_out_of_bounds, "resize_if_index_out_of_bounds") + _attr_element_dtype, (item,) = _execute.args_to_matching_eager([item], ctx, []) + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + index = _ops.convert_to_tensor(index, _dtypes.int32) + _inputs_flat = [input_handle, index, item] + _attrs = ("element_dtype", _attr_element_dtype, + "resize_if_index_out_of_bounds", resize_if_index_out_of_bounds) + _result = _execute.execute(b"TensorListSetItem", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListSetItem", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListSplit_element_dtype = TypeVar("TV_TensorListSplit_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_TensorListSplit_shape_type = TypeVar("TV_TensorListSplit_shape_type", _atypes.Int32, _atypes.Int64) + +def tensor_list_split(tensor: Annotated[Any, TV_TensorListSplit_element_dtype], element_shape: Annotated[Any, TV_TensorListSplit_shape_type], lengths: Annotated[Any, _atypes.Int64], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Splits a tensor into a list. + + list[i] corresponds to lengths[i] tensors from the input tensor. + The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + + tensor: The input tensor. + element_shape: A shape compatible with that of elements in the tensor. + lengths: Vector of sizes of the 0th dimension of tensors in the list. + output_handle: The list. + + Args: + tensor: A `Tensor`. + element_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + lengths: A `Tensor` of type `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListSplit", name, tensor, element_shape, lengths) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_split_eager_fallback( + tensor, element_shape, lengths, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListSplit", tensor=tensor, element_shape=element_shape, + lengths=lengths, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "shape_type", _op._get_attr_type("shape_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListSplit", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListSplit = tf_export("raw_ops.TensorListSplit")(_ops.to_raw_op(tensor_list_split)) + + +def tensor_list_split_eager_fallback(tensor: Annotated[Any, TV_TensorListSplit_element_dtype], element_shape: Annotated[Any, TV_TensorListSplit_shape_type], lengths: Annotated[Any, _atypes.Int64], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_element_dtype, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _attr_shape_type, (element_shape,) = _execute.args_to_matching_eager([element_shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + lengths = _ops.convert_to_tensor(lengths, _dtypes.int64) + _inputs_flat = [tensor, element_shape, lengths] + _attrs = ("element_dtype", _attr_element_dtype, "shape_type", + _attr_shape_type) + _result = _execute.execute(b"TensorListSplit", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListSplit", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TensorListStack_element_dtype = TypeVar("TV_TensorListStack_element_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tensor_list_stack(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListStack_element_dtype, num_elements:int=-1, name=None) -> Annotated[Any, TV_TensorListStack_element_dtype]: + r"""Stacks all tensors in the list. + + Requires that all tensors have the same shape. + + input_handle: the input list + tensor: the gathered result + num_elements: optional. If not -1, the number of elements in the list. + + Args: + input_handle: A `Tensor` of type `variant`. + element_shape: A `Tensor` of type `int32`. + element_dtype: A `tf.DType`. + num_elements: An optional `int`. Defaults to `-1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `element_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TensorListStack", name, input_handle, element_shape, + "element_dtype", element_dtype, "num_elements", num_elements) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tensor_list_stack_eager_fallback( + input_handle, element_shape, element_dtype=element_dtype, + num_elements=num_elements, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + element_dtype = _execute.make_type(element_dtype, "element_dtype") + if num_elements is None: + num_elements = -1 + num_elements = _execute.make_int(num_elements, "num_elements") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TensorListStack", input_handle=input_handle, + element_shape=element_shape, + element_dtype=element_dtype, + num_elements=num_elements, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("element_dtype", _op._get_attr_type("element_dtype"), + "num_elements", _op._get_attr_int("num_elements")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TensorListStack", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TensorListStack = tf_export("raw_ops.TensorListStack")(_ops.to_raw_op(tensor_list_stack)) + + +def tensor_list_stack_eager_fallback(input_handle: Annotated[Any, _atypes.Variant], element_shape: Annotated[Any, _atypes.Int32], element_dtype: TV_TensorListStack_element_dtype, num_elements: int, name, ctx) -> Annotated[Any, TV_TensorListStack_element_dtype]: + element_dtype = _execute.make_type(element_dtype, "element_dtype") + if num_elements is None: + num_elements = -1 + num_elements = _execute.make_int(num_elements, "num_elements") + input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant) + element_shape = _ops.convert_to_tensor(element_shape, _dtypes.int32) + _inputs_flat = [input_handle, element_shape] + _attrs = ("element_dtype", element_dtype, "num_elements", num_elements) + _result = _execute.execute(b"TensorListStack", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TensorListStack", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_array_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d11841e1e6d04640feb65ddee358a986aa12b817 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_array_ops.py @@ -0,0 +1,525 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated +_RaggedCrossOutput = collections.namedtuple( + "RaggedCross", + ["output_values", "output_row_splits"]) + + +TV_RaggedCross_out_values_type = TypeVar("TV_RaggedCross_out_values_type", _atypes.Int64, _atypes.String) +TV_RaggedCross_out_row_splits_type = TypeVar("TV_RaggedCross_out_row_splits_type", _atypes.Int32, _atypes.Int64) + +def ragged_cross(ragged_values, ragged_row_splits, sparse_indices: Annotated[List[Any], _atypes.Int64], sparse_values, sparse_shape: Annotated[List[Any], _atypes.Int64], dense_inputs, input_order: str, hashed_output: bool, num_buckets: int, hash_key: int, out_values_type: TV_RaggedCross_out_values_type, out_row_splits_type: TV_RaggedCross_out_row_splits_type, name=None): + r"""Generates a feature cross from a list of tensors, and returns it as a +RaggedTensor. See `tf.ragged.cross` for more details. + + Args: + ragged_values: A list of `Tensor` objects with types from: `int64`, `string`. + The values tensor for each RaggedTensor input. + ragged_row_splits: A list of `Tensor` objects with types from: `int32`, `int64`. + The row_splits tensor for each RaggedTensor input. + sparse_indices: A list of `Tensor` objects with type `int64`. + The indices tensor for each SparseTensor input. + sparse_values: A list of `Tensor` objects with types from: `int64`, `string`. + The values tensor for each SparseTensor input. + sparse_shape: A list with the same length as `sparse_indices` of `Tensor` objects with type `int64`. + The dense_shape tensor for each SparseTensor input. + dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`. + The tf.Tensor inputs. + input_order: A `string`. + String specifying the tensor type for each input. The `i`th character in + this string specifies the type of the `i`th input, and is one of: 'R' (ragged), + 'D' (dense), or 'S' (sparse). This attr is used to ensure that the crossed + values are combined in the order of the inputs from the call to tf.ragged.cross. + hashed_output: A `bool`. + num_buckets: An `int` that is `>= 0`. + hash_key: An `int`. + out_values_type: A `tf.DType` from: `tf.int64, tf.string`. + out_row_splits_type: A `tf.DType` from: `tf.int32, tf.int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_values, output_row_splits). + + output_values: A `Tensor` of type `out_values_type`. + output_row_splits: A `Tensor` of type `out_row_splits_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedCross", name, ragged_values, ragged_row_splits, + sparse_indices, sparse_values, sparse_shape, dense_inputs, + "input_order", input_order, "hashed_output", hashed_output, + "num_buckets", num_buckets, "hash_key", hash_key, "out_values_type", + out_values_type, "out_row_splits_type", out_row_splits_type) + _result = _RaggedCrossOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ragged_cross_eager_fallback( + ragged_values, ragged_row_splits, sparse_indices, sparse_values, + sparse_shape, dense_inputs, input_order=input_order, + hashed_output=hashed_output, num_buckets=num_buckets, + hash_key=hash_key, out_values_type=out_values_type, + out_row_splits_type=out_row_splits_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sparse_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_indices' argument to " + "'ragged_cross' Op, not %r." % sparse_indices) + _attr_Nsparse = len(sparse_indices) + if not isinstance(sparse_shape, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_shape' argument to " + "'ragged_cross' Op, not %r." % sparse_shape) + if len(sparse_shape) != _attr_Nsparse: + raise ValueError( + "List argument 'sparse_shape' to 'ragged_cross' Op with length %d " + "must match length %d of argument 'sparse_indices'." % + (len(sparse_shape), _attr_Nsparse)) + input_order = _execute.make_str(input_order, "input_order") + hashed_output = _execute.make_bool(hashed_output, "hashed_output") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + hash_key = _execute.make_int(hash_key, "hash_key") + out_values_type = _execute.make_type(out_values_type, "out_values_type") + out_row_splits_type = _execute.make_type(out_row_splits_type, "out_row_splits_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedCross", ragged_values=ragged_values, + ragged_row_splits=ragged_row_splits, + sparse_indices=sparse_indices, + sparse_values=sparse_values, sparse_shape=sparse_shape, + dense_inputs=dense_inputs, input_order=input_order, + hashed_output=hashed_output, num_buckets=num_buckets, + hash_key=hash_key, out_values_type=out_values_type, + out_row_splits_type=out_row_splits_type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Nsparse", _op._get_attr_int("Nsparse"), "input_order", + _op.get_attr("input_order"), "hashed_output", + _op._get_attr_bool("hashed_output"), "num_buckets", + _op._get_attr_int("num_buckets"), "hash_key", + _op._get_attr_int("hash_key"), "ragged_values_types", + _op.get_attr("ragged_values_types"), "ragged_splits_types", + _op.get_attr("ragged_splits_types"), "sparse_values_types", + _op.get_attr("sparse_values_types"), "dense_types", + _op.get_attr("dense_types"), "out_values_type", + _op._get_attr_type("out_values_type"), "out_row_splits_type", + _op._get_attr_type("out_row_splits_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedCross", _inputs_flat, _attrs, _result) + _result = _RaggedCrossOutput._make(_result) + return _result + +RaggedCross = tf_export("raw_ops.RaggedCross")(_ops.to_raw_op(ragged_cross)) + + +def ragged_cross_eager_fallback(ragged_values, ragged_row_splits, sparse_indices: Annotated[List[Any], _atypes.Int64], sparse_values, sparse_shape: Annotated[List[Any], _atypes.Int64], dense_inputs, input_order: str, hashed_output: bool, num_buckets: int, hash_key: int, out_values_type: TV_RaggedCross_out_values_type, out_row_splits_type: TV_RaggedCross_out_row_splits_type, name, ctx): + if not isinstance(sparse_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_indices' argument to " + "'ragged_cross' Op, not %r." % sparse_indices) + _attr_Nsparse = len(sparse_indices) + if not isinstance(sparse_shape, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_shape' argument to " + "'ragged_cross' Op, not %r." % sparse_shape) + if len(sparse_shape) != _attr_Nsparse: + raise ValueError( + "List argument 'sparse_shape' to 'ragged_cross' Op with length %d " + "must match length %d of argument 'sparse_indices'." % + (len(sparse_shape), _attr_Nsparse)) + input_order = _execute.make_str(input_order, "input_order") + hashed_output = _execute.make_bool(hashed_output, "hashed_output") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + hash_key = _execute.make_int(hash_key, "hash_key") + out_values_type = _execute.make_type(out_values_type, "out_values_type") + out_row_splits_type = _execute.make_type(out_row_splits_type, "out_row_splits_type") + _attr_ragged_values_types, ragged_values = _execute.convert_to_mixed_eager_tensors(ragged_values, ctx) + _attr_ragged_splits_types, ragged_row_splits = _execute.convert_to_mixed_eager_tensors(ragged_row_splits, ctx) + _attr_sparse_values_types, sparse_values = _execute.convert_to_mixed_eager_tensors(sparse_values, ctx) + _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(dense_inputs, ctx) + sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64) + sparse_shape = _ops.convert_n_to_tensor(sparse_shape, _dtypes.int64) + _inputs_flat = list(ragged_values) + list(ragged_row_splits) + list(sparse_indices) + list(sparse_values) + list(sparse_shape) + list(dense_inputs) + _attrs = ("Nsparse", _attr_Nsparse, "input_order", input_order, + "hashed_output", hashed_output, "num_buckets", num_buckets, "hash_key", + hash_key, "ragged_values_types", _attr_ragged_values_types, + "ragged_splits_types", _attr_ragged_splits_types, "sparse_values_types", + _attr_sparse_values_types, "dense_types", _attr_dense_types, + "out_values_type", out_values_type, "out_row_splits_type", + out_row_splits_type) + _result = _execute.execute(b"RaggedCross", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedCross", _inputs_flat, _attrs, _result) + _result = _RaggedCrossOutput._make(_result) + return _result + +_RaggedFillEmptyRowsOutput = collections.namedtuple( + "RaggedFillEmptyRows", + ["output_value_rowids", "output_values", "empty_row_indicator", "reverse_index_map"]) + + +TV_RaggedFillEmptyRows_T = TypeVar("TV_RaggedFillEmptyRows_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('ragged_fill_empty_rows') +def ragged_fill_empty_rows(value_rowids: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_RaggedFillEmptyRows_T], nrows: Annotated[Any, _atypes.Int64], default_value: Annotated[Any, TV_RaggedFillEmptyRows_T], name=None): + r"""TODO: add doc. + + Args: + value_rowids: A `Tensor` of type `int64`. + values: A `Tensor`. + nrows: A `Tensor` of type `int64`. + default_value: A `Tensor`. Must have the same type as `values`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_value_rowids, output_values, empty_row_indicator, reverse_index_map). + + output_value_rowids: A `Tensor` of type `int64`. + output_values: A `Tensor`. Has the same type as `values`. + empty_row_indicator: A `Tensor` of type `bool`. + reverse_index_map: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedFillEmptyRows", name, value_rowids, values, nrows, + default_value) + _result = _RaggedFillEmptyRowsOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_ragged_fill_empty_rows( + (value_rowids, values, nrows, default_value, name,), None) + if _result is not NotImplemented: + return _result + return ragged_fill_empty_rows_eager_fallback( + value_rowids, values, nrows, default_value, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + ragged_fill_empty_rows, (), dict(value_rowids=value_rowids, + values=values, nrows=nrows, + default_value=default_value, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_ragged_fill_empty_rows( + (value_rowids, values, nrows, default_value, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedFillEmptyRows", value_rowids=value_rowids, values=values, + nrows=nrows, default_value=default_value, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + ragged_fill_empty_rows, (), dict(value_rowids=value_rowids, + values=values, nrows=nrows, + default_value=default_value, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedFillEmptyRows", _inputs_flat, _attrs, _result) + _result = _RaggedFillEmptyRowsOutput._make(_result) + return _result + +RaggedFillEmptyRows = tf_export("raw_ops.RaggedFillEmptyRows")(_ops.to_raw_op(ragged_fill_empty_rows)) +_dispatcher_for_ragged_fill_empty_rows = ragged_fill_empty_rows._tf_type_based_dispatcher.Dispatch + + +def ragged_fill_empty_rows_eager_fallback(value_rowids: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_RaggedFillEmptyRows_T], nrows: Annotated[Any, _atypes.Int64], default_value: Annotated[Any, TV_RaggedFillEmptyRows_T], name, ctx): + _attr_T, _inputs_T = _execute.args_to_matching_eager([values, default_value], ctx, []) + (values, default_value) = _inputs_T + value_rowids = _ops.convert_to_tensor(value_rowids, _dtypes.int64) + nrows = _ops.convert_to_tensor(nrows, _dtypes.int64) + _inputs_flat = [value_rowids, values, nrows, default_value] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RaggedFillEmptyRows", 4, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedFillEmptyRows", _inputs_flat, _attrs, _result) + _result = _RaggedFillEmptyRowsOutput._make(_result) + return _result + +_RaggedFillEmptyRowsGradOutput = collections.namedtuple( + "RaggedFillEmptyRowsGrad", + ["d_values", "d_default_value"]) + + +TV_RaggedFillEmptyRowsGrad_T = TypeVar("TV_RaggedFillEmptyRowsGrad_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('ragged_fill_empty_rows_grad') +def ragged_fill_empty_rows_grad(reverse_index_map: Annotated[Any, _atypes.Int64], grad_values: Annotated[Any, TV_RaggedFillEmptyRowsGrad_T], name=None): + r"""TODO: add doc. + + Args: + reverse_index_map: A `Tensor` of type `int64`. + grad_values: A `Tensor`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (d_values, d_default_value). + + d_values: A `Tensor`. Has the same type as `grad_values`. + d_default_value: A `Tensor`. Has the same type as `grad_values`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedFillEmptyRowsGrad", name, reverse_index_map, grad_values) + _result = _RaggedFillEmptyRowsGradOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_ragged_fill_empty_rows_grad( + (reverse_index_map, grad_values, name,), None) + if _result is not NotImplemented: + return _result + return ragged_fill_empty_rows_grad_eager_fallback( + reverse_index_map, grad_values, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + ragged_fill_empty_rows_grad, (), dict(reverse_index_map=reverse_index_map, + grad_values=grad_values, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_ragged_fill_empty_rows_grad( + (reverse_index_map, grad_values, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedFillEmptyRowsGrad", reverse_index_map=reverse_index_map, + grad_values=grad_values, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + ragged_fill_empty_rows_grad, (), dict(reverse_index_map=reverse_index_map, + grad_values=grad_values, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedFillEmptyRowsGrad", _inputs_flat, _attrs, _result) + _result = _RaggedFillEmptyRowsGradOutput._make(_result) + return _result + +RaggedFillEmptyRowsGrad = tf_export("raw_ops.RaggedFillEmptyRowsGrad")(_ops.to_raw_op(ragged_fill_empty_rows_grad)) +_dispatcher_for_ragged_fill_empty_rows_grad = ragged_fill_empty_rows_grad._tf_type_based_dispatcher.Dispatch + + +def ragged_fill_empty_rows_grad_eager_fallback(reverse_index_map: Annotated[Any, _atypes.Int64], grad_values: Annotated[Any, TV_RaggedFillEmptyRowsGrad_T], name, ctx): + _attr_T, (grad_values,) = _execute.args_to_matching_eager([grad_values], ctx, []) + reverse_index_map = _ops.convert_to_tensor(reverse_index_map, _dtypes.int64) + _inputs_flat = [reverse_index_map, grad_values] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RaggedFillEmptyRowsGrad", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedFillEmptyRowsGrad", _inputs_flat, _attrs, _result) + _result = _RaggedFillEmptyRowsGradOutput._make(_result) + return _result + +_RaggedGatherOutput = collections.namedtuple( + "RaggedGather", + ["output_nested_splits", "output_dense_values"]) + + +TV_RaggedGather_Tvalues = TypeVar("TV_RaggedGather_Tvalues", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_RaggedGather_Tindices = TypeVar("TV_RaggedGather_Tindices", _atypes.Int32, _atypes.Int64) +TV_RaggedGather_Tsplits = TypeVar("TV_RaggedGather_Tsplits", _atypes.Int32, _atypes.Int64) + +def ragged_gather(params_nested_splits: Annotated[List[Any], TV_RaggedGather_Tsplits], params_dense_values: Annotated[Any, TV_RaggedGather_Tvalues], indices: Annotated[Any, TV_RaggedGather_Tindices], OUTPUT_RAGGED_RANK: int, name=None): + r"""Gather ragged slices from `params` axis `0` according to `indices`. + + Outputs a `RaggedTensor` output composed from `output_dense_values` and + `output_nested_splits`, such that: + + ```python + output.shape = indices.shape + params.shape[1:] + output.ragged_rank = indices.shape.ndims + params.ragged_rank + output[i...j, d0...dn] = params[indices[i...j], d0...dn] + ``` + + where + + * `params = + ragged.from_nested_row_splits(params_dense_values, params_nested_splits)` + provides the values that should be gathered. + * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which + values should be gathered. + * `output = + ragged.from_nested_row_splits(output_dense_values, output_nested_splits)` + is the output tensor. + + (Note: This c++ op is used to implement the higher-level python + `tf.ragged.gather` op, which also supports ragged indices.) + + Args: + params_nested_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + The `nested_row_splits` tensors that define the row-partitioning for the + `params` RaggedTensor input. + params_dense_values: A `Tensor`. + The `flat_values` for the `params` RaggedTensor. There was a terminology change + at the python level from dense_values to flat_values, so dense_values is the + deprecated name. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Indices in the outermost dimension of `params` of the values that should be + gathered. + OUTPUT_RAGGED_RANK: An `int` that is `>= 0`. + The ragged rank of the output RaggedTensor. `output_nested_splits` will contain + this number of `row_splits` tensors. This value should equal + `indices.shape.ndims + params.ragged_rank - 1`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_nested_splits, output_dense_values). + + output_nested_splits: A list of `OUTPUT_RAGGED_RANK` `Tensor` objects with the same type as `params_nested_splits`. + output_dense_values: A `Tensor`. Has the same type as `params_dense_values`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedGather", name, params_nested_splits, params_dense_values, + indices, "OUTPUT_RAGGED_RANK", OUTPUT_RAGGED_RANK) + _result = _RaggedGatherOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ragged_gather_eager_fallback( + params_nested_splits, params_dense_values, indices, + OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(params_nested_splits, (list, tuple)): + raise TypeError( + "Expected list for 'params_nested_splits' argument to " + "'ragged_gather' Op, not %r." % params_nested_splits) + _attr_PARAMS_RAGGED_RANK = len(params_nested_splits) + OUTPUT_RAGGED_RANK = _execute.make_int(OUTPUT_RAGGED_RANK, "OUTPUT_RAGGED_RANK") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedGather", params_nested_splits=params_nested_splits, + params_dense_values=params_dense_values, + indices=indices, + OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tvalues", _op._get_attr_type("Tvalues"), "Tindices", + _op._get_attr_type("Tindices"), "Tsplits", + _op._get_attr_type("Tsplits"), "PARAMS_RAGGED_RANK", + _op._get_attr_int("PARAMS_RAGGED_RANK"), "OUTPUT_RAGGED_RANK", + _op._get_attr_int("OUTPUT_RAGGED_RANK")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedGather", _inputs_flat, _attrs, _result) + _result = [_result[:OUTPUT_RAGGED_RANK]] + _result[OUTPUT_RAGGED_RANK:] + _result = _RaggedGatherOutput._make(_result) + return _result + +RaggedGather = tf_export("raw_ops.RaggedGather")(_ops.to_raw_op(ragged_gather)) + + +def ragged_gather_eager_fallback(params_nested_splits: Annotated[List[Any], TV_RaggedGather_Tsplits], params_dense_values: Annotated[Any, TV_RaggedGather_Tvalues], indices: Annotated[Any, TV_RaggedGather_Tindices], OUTPUT_RAGGED_RANK: int, name, ctx): + if not isinstance(params_nested_splits, (list, tuple)): + raise TypeError( + "Expected list for 'params_nested_splits' argument to " + "'ragged_gather' Op, not %r." % params_nested_splits) + _attr_PARAMS_RAGGED_RANK = len(params_nested_splits) + OUTPUT_RAGGED_RANK = _execute.make_int(OUTPUT_RAGGED_RANK, "OUTPUT_RAGGED_RANK") + _attr_Tvalues, (params_dense_values,) = _execute.args_to_matching_eager([params_dense_values], ctx, []) + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tsplits, params_nested_splits = _execute.args_to_matching_eager(list(params_nested_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = list(params_nested_splits) + [params_dense_values, indices] + _attrs = ("Tvalues", _attr_Tvalues, "Tindices", _attr_Tindices, "Tsplits", + _attr_Tsplits, "PARAMS_RAGGED_RANK", _attr_PARAMS_RAGGED_RANK, + "OUTPUT_RAGGED_RANK", OUTPUT_RAGGED_RANK) + _result = _execute.execute(b"RaggedGather", OUTPUT_RAGGED_RANK + 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedGather", _inputs_flat, _attrs, _result) + _result = [_result[:OUTPUT_RAGGED_RANK]] + _result[OUTPUT_RAGGED_RANK:] + _result = _RaggedGatherOutput._make(_result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_special_math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_special_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8c9e6a52091797ce68ada5ff9472befd1a12909c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_special_math_ops.py @@ -0,0 +1,975 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_BesselI0_T = TypeVar("TV_BesselI0_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_i0(x: Annotated[Any, TV_BesselI0_T], name=None) -> Annotated[Any, TV_BesselI0_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselI0", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_i0_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselI0", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselI0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselI0 = tf_export("raw_ops.BesselI0")(_ops.to_raw_op(bessel_i0)) + + +def bessel_i0_eager_fallback(x: Annotated[Any, TV_BesselI0_T], name, ctx) -> Annotated[Any, TV_BesselI0_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselI0", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselI0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselI0e_T = TypeVar("TV_BesselI0e_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_i0e(x: Annotated[Any, TV_BesselI0e_T], name=None) -> Annotated[Any, TV_BesselI0e_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselI0e", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_i0e_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselI0e", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselI0e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselI0e = tf_export("raw_ops.BesselI0e")(_ops.to_raw_op(bessel_i0e)) + + +def bessel_i0e_eager_fallback(x: Annotated[Any, TV_BesselI0e_T], name, ctx) -> Annotated[Any, TV_BesselI0e_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselI0e", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselI0e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselI1_T = TypeVar("TV_BesselI1_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_i1(x: Annotated[Any, TV_BesselI1_T], name=None) -> Annotated[Any, TV_BesselI1_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselI1", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_i1_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselI1", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselI1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselI1 = tf_export("raw_ops.BesselI1")(_ops.to_raw_op(bessel_i1)) + + +def bessel_i1_eager_fallback(x: Annotated[Any, TV_BesselI1_T], name, ctx) -> Annotated[Any, TV_BesselI1_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselI1", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselI1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselI1e_T = TypeVar("TV_BesselI1e_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_i1e(x: Annotated[Any, TV_BesselI1e_T], name=None) -> Annotated[Any, TV_BesselI1e_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselI1e", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_i1e_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselI1e", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselI1e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselI1e = tf_export("raw_ops.BesselI1e")(_ops.to_raw_op(bessel_i1e)) + + +def bessel_i1e_eager_fallback(x: Annotated[Any, TV_BesselI1e_T], name, ctx) -> Annotated[Any, TV_BesselI1e_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselI1e", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselI1e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselJ0_T = TypeVar("TV_BesselJ0_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_j0(x: Annotated[Any, TV_BesselJ0_T], name=None) -> Annotated[Any, TV_BesselJ0_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselJ0", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_j0_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselJ0", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselJ0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselJ0 = tf_export("raw_ops.BesselJ0")(_ops.to_raw_op(bessel_j0)) + + +def bessel_j0_eager_fallback(x: Annotated[Any, TV_BesselJ0_T], name, ctx) -> Annotated[Any, TV_BesselJ0_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselJ0", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselJ0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselJ1_T = TypeVar("TV_BesselJ1_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_j1(x: Annotated[Any, TV_BesselJ1_T], name=None) -> Annotated[Any, TV_BesselJ1_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselJ1", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_j1_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselJ1", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselJ1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselJ1 = tf_export("raw_ops.BesselJ1")(_ops.to_raw_op(bessel_j1)) + + +def bessel_j1_eager_fallback(x: Annotated[Any, TV_BesselJ1_T], name, ctx) -> Annotated[Any, TV_BesselJ1_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselJ1", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselJ1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselK0_T = TypeVar("TV_BesselK0_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_k0(x: Annotated[Any, TV_BesselK0_T], name=None) -> Annotated[Any, TV_BesselK0_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselK0", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_k0_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselK0", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselK0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselK0 = tf_export("raw_ops.BesselK0")(_ops.to_raw_op(bessel_k0)) + + +def bessel_k0_eager_fallback(x: Annotated[Any, TV_BesselK0_T], name, ctx) -> Annotated[Any, TV_BesselK0_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselK0", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselK0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselK0e_T = TypeVar("TV_BesselK0e_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_k0e(x: Annotated[Any, TV_BesselK0e_T], name=None) -> Annotated[Any, TV_BesselK0e_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselK0e", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_k0e_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselK0e", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselK0e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselK0e = tf_export("raw_ops.BesselK0e")(_ops.to_raw_op(bessel_k0e)) + + +def bessel_k0e_eager_fallback(x: Annotated[Any, TV_BesselK0e_T], name, ctx) -> Annotated[Any, TV_BesselK0e_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselK0e", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselK0e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselK1_T = TypeVar("TV_BesselK1_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_k1(x: Annotated[Any, TV_BesselK1_T], name=None) -> Annotated[Any, TV_BesselK1_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselK1", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_k1_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselK1", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselK1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselK1 = tf_export("raw_ops.BesselK1")(_ops.to_raw_op(bessel_k1)) + + +def bessel_k1_eager_fallback(x: Annotated[Any, TV_BesselK1_T], name, ctx) -> Annotated[Any, TV_BesselK1_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselK1", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselK1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselK1e_T = TypeVar("TV_BesselK1e_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_k1e(x: Annotated[Any, TV_BesselK1e_T], name=None) -> Annotated[Any, TV_BesselK1e_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselK1e", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_k1e_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselK1e", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselK1e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselK1e = tf_export("raw_ops.BesselK1e")(_ops.to_raw_op(bessel_k1e)) + + +def bessel_k1e_eager_fallback(x: Annotated[Any, TV_BesselK1e_T], name, ctx) -> Annotated[Any, TV_BesselK1e_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselK1e", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselK1e", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselY0_T = TypeVar("TV_BesselY0_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_y0(x: Annotated[Any, TV_BesselY0_T], name=None) -> Annotated[Any, TV_BesselY0_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselY0", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_y0_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselY0", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselY0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselY0 = tf_export("raw_ops.BesselY0")(_ops.to_raw_op(bessel_y0)) + + +def bessel_y0_eager_fallback(x: Annotated[Any, TV_BesselY0_T], name, ctx) -> Annotated[Any, TV_BesselY0_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselY0", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselY0", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BesselY1_T = TypeVar("TV_BesselY1_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def bessel_y1(x: Annotated[Any, TV_BesselY1_T], name=None) -> Annotated[Any, TV_BesselY1_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BesselY1", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bessel_y1_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BesselY1", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BesselY1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BesselY1 = tf_export("raw_ops.BesselY1")(_ops.to_raw_op(bessel_y1)) + + +def bessel_y1_eager_fallback(x: Annotated[Any, TV_BesselY1_T], name, ctx) -> Annotated[Any, TV_BesselY1_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"BesselY1", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BesselY1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Dawsn_T = TypeVar("TV_Dawsn_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def dawsn(x: Annotated[Any, TV_Dawsn_T], name=None) -> Annotated[Any, TV_Dawsn_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Dawsn", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dawsn_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Dawsn", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Dawsn", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Dawsn = tf_export("raw_ops.Dawsn")(_ops.to_raw_op(dawsn)) + + +def dawsn_eager_fallback(x: Annotated[Any, TV_Dawsn_T], name, ctx) -> Annotated[Any, TV_Dawsn_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Dawsn", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Dawsn", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Expint_T = TypeVar("TV_Expint_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def expint(x: Annotated[Any, TV_Expint_T], name=None) -> Annotated[Any, TV_Expint_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Expint", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return expint_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Expint", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Expint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Expint = tf_export("raw_ops.Expint")(_ops.to_raw_op(expint)) + + +def expint_eager_fallback(x: Annotated[Any, TV_Expint_T], name, ctx) -> Annotated[Any, TV_Expint_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Expint", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Expint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_FresnelCos_T = TypeVar("TV_FresnelCos_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def fresnel_cos(x: Annotated[Any, TV_FresnelCos_T], name=None) -> Annotated[Any, TV_FresnelCos_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FresnelCos", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return fresnel_cos_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FresnelCos", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FresnelCos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FresnelCos = tf_export("raw_ops.FresnelCos")(_ops.to_raw_op(fresnel_cos)) + + +def fresnel_cos_eager_fallback(x: Annotated[Any, TV_FresnelCos_T], name, ctx) -> Annotated[Any, TV_FresnelCos_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"FresnelCos", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FresnelCos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_FresnelSin_T = TypeVar("TV_FresnelSin_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def fresnel_sin(x: Annotated[Any, TV_FresnelSin_T], name=None) -> Annotated[Any, TV_FresnelSin_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FresnelSin", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return fresnel_sin_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FresnelSin", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FresnelSin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FresnelSin = tf_export("raw_ops.FresnelSin")(_ops.to_raw_op(fresnel_sin)) + + +def fresnel_sin_eager_fallback(x: Annotated[Any, TV_FresnelSin_T], name, ctx) -> Annotated[Any, TV_FresnelSin_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"FresnelSin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FresnelSin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Spence_T = TypeVar("TV_Spence_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def spence(x: Annotated[Any, TV_Spence_T], name=None) -> Annotated[Any, TV_Spence_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Spence", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return spence_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Spence", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Spence", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Spence = tf_export("raw_ops.Spence")(_ops.to_raw_op(spence)) + + +def spence_eager_fallback(x: Annotated[Any, TV_Spence_T], name, ctx) -> Annotated[Any, TV_Spence_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Spence", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Spence", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_string_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_string_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bfb9429bdf1732f2dcde16ab9445c1b3235801b1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_string_ops.py @@ -0,0 +1,2848 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_AsString_T = TypeVar("TV_AsString_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.as_string', 'as_string', v1=['dtypes.as_string', 'strings.as_string', 'as_string']) +@deprecated_endpoints('dtypes.as_string') +def as_string(input: Annotated[Any, TV_AsString_T], precision:int=-1, scientific:bool=False, shortest:bool=False, width:int=-1, fill:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Converts each entry in the given tensor to strings. + + Supports many numeric types and boolean. + + For Unicode, see the + [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) + tutorial. + + Examples: + + >>> tf.strings.as_string([3, 2]) + + >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + array([b'3.14', b'2.72'], dtype=object) + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`, `variant`, `string`. + precision: An optional `int`. Defaults to `-1`. + The post-decimal precision to use for floating point numbers. + Only used if precision > -1. + scientific: An optional `bool`. Defaults to `False`. + Use scientific notation for floating point numbers. + shortest: An optional `bool`. Defaults to `False`. + Use shortest representation (either scientific or standard) for + floating point numbers. + width: An optional `int`. Defaults to `-1`. + Pad pre-decimal numbers to this width. + Applies to both floating point and integer numbers. + Only used if width > -1. + fill: An optional `string`. Defaults to `""`. + The value to pad if width > -1. If empty, pads with spaces. + Another typical value is '0'. String cannot be longer than 1 character. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AsString", name, input, "precision", precision, "scientific", + scientific, "shortest", shortest, "width", width, "fill", fill) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_as_string( + (input, precision, scientific, shortest, width, fill, name,), None) + if _result is not NotImplemented: + return _result + return as_string_eager_fallback( + input, precision=precision, scientific=scientific, + shortest=shortest, width=width, fill=fill, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + as_string, (), dict(input=input, precision=precision, + scientific=scientific, shortest=shortest, + width=width, fill=fill, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_as_string( + (input, precision, scientific, shortest, width, fill, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if precision is None: + precision = -1 + precision = _execute.make_int(precision, "precision") + if scientific is None: + scientific = False + scientific = _execute.make_bool(scientific, "scientific") + if shortest is None: + shortest = False + shortest = _execute.make_bool(shortest, "shortest") + if width is None: + width = -1 + width = _execute.make_int(width, "width") + if fill is None: + fill = "" + fill = _execute.make_str(fill, "fill") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AsString", input=input, precision=precision, scientific=scientific, + shortest=shortest, width=width, fill=fill, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + as_string, (), dict(input=input, precision=precision, + scientific=scientific, shortest=shortest, + width=width, fill=fill, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "precision", + _op._get_attr_int("precision"), "scientific", + _op._get_attr_bool("scientific"), "shortest", + _op._get_attr_bool("shortest"), "width", + _op._get_attr_int("width"), "fill", _op.get_attr("fill")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AsString", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AsString = tf_export("raw_ops.AsString")(_ops.to_raw_op(as_string)) +_dispatcher_for_as_string = as_string._tf_type_based_dispatcher.Dispatch + + +def as_string_eager_fallback(input: Annotated[Any, TV_AsString_T], precision: int, scientific: bool, shortest: bool, width: int, fill: str, name, ctx) -> Annotated[Any, _atypes.String]: + if precision is None: + precision = -1 + precision = _execute.make_int(precision, "precision") + if scientific is None: + scientific = False + scientific = _execute.make_bool(scientific, "scientific") + if shortest is None: + shortest = False + shortest = _execute.make_bool(shortest, "shortest") + if width is None: + width = -1 + width = _execute.make_int(width, "width") + if fill is None: + fill = "" + fill = _execute.make_str(fill, "fill") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.complex64, _dtypes.complex128, _dtypes.bool, _dtypes.variant, _dtypes.string, ]) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "precision", precision, "scientific", scientific, + "shortest", shortest, "width", width, "fill", fill) + _result = _execute.execute(b"AsString", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AsString", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.decode_base64', v1=['io.decode_base64', 'decode_base64']) +@deprecated_endpoints('decode_base64') +def decode_base64(input: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.String]: + r"""Decode web-safe base64-encoded strings. + + Input may or may not have padding at the end. See + [EncodeBase64](https://www.tensorflow.org/api_docs/python/tf/io/encode_base64) + for padding. Web-safe means that input must use - and _ instead of + and /. + + Args: + input: A `Tensor` of type `string`. Base64 strings to decode. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeBase64", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_decode_base64( + (input, name,), None) + if _result is not NotImplemented: + return _result + return decode_base64_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_base64, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_decode_base64( + (input, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeBase64", input=input, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_base64, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeBase64", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DecodeBase64 = tf_export("raw_ops.DecodeBase64")(_ops.to_raw_op(decode_base64)) +_dispatcher_for_decode_base64 = decode_base64._tf_type_based_dispatcher.Dispatch + + +def decode_base64_eager_fallback(input: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.String]: + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = None + _result = _execute.execute(b"DecodeBase64", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeBase64", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.encode_base64', v1=['io.encode_base64', 'encode_base64']) +@deprecated_endpoints('encode_base64') +def encode_base64(input: Annotated[Any, _atypes.String], pad:bool=False, name=None) -> Annotated[Any, _atypes.String]: + r"""Encode strings into web-safe base64 format. + + Refer to [this article](https://en.wikipedia.org/wiki/Base64) for more information on + base64 format. Base64 strings may have padding with '=' at the + end so that the encoded has length multiple of 4. See Padding section of the + link above. + + Web-safe means that the encoder uses - and _ instead of + and /. + + Args: + input: A `Tensor` of type `string`. Strings to be encoded. + pad: An optional `bool`. Defaults to `False`. + Bool whether padding is applied at the ends. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EncodeBase64", name, input, "pad", pad) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_encode_base64( + (input, pad, name,), None) + if _result is not NotImplemented: + return _result + return encode_base64_eager_fallback( + input, pad=pad, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_base64, (), dict(input=input, pad=pad, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_encode_base64( + (input, pad, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if pad is None: + pad = False + pad = _execute.make_bool(pad, "pad") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EncodeBase64", input=input, pad=pad, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_base64, (), dict(input=input, pad=pad, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("pad", _op._get_attr_bool("pad")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "EncodeBase64", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +EncodeBase64 = tf_export("raw_ops.EncodeBase64")(_ops.to_raw_op(encode_base64)) +_dispatcher_for_encode_base64 = encode_base64._tf_type_based_dispatcher.Dispatch + + +def encode_base64_eager_fallback(input: Annotated[Any, _atypes.String], pad: bool, name, ctx) -> Annotated[Any, _atypes.String]: + if pad is None: + pad = False + pad = _execute.make_bool(pad, "pad") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("pad", pad) + _result = _execute.execute(b"EncodeBase64", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "EncodeBase64", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def reduce_join(inputs: Annotated[Any, _atypes.String], reduction_indices: Annotated[Any, _atypes.Int32], keep_dims:bool=False, separator:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Joins a string Tensor across the given dimensions. + + Computes the string join across dimensions in the given string Tensor of shape + `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input + strings with the given separator (default: empty string). Negative indices are + counted backwards from the end, with `-1` being equivalent to `n - 1`. If + indices are not specified, joins across all dimensions beginning from `n - 1` + through `0`. + + For example: + + ```python + # tensor `a` is [["a", "b"], ["c", "d"]] + tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + tf.reduce_join(a, [0, 1]) ==> "acbd" + tf.reduce_join(a, [1, 0]) ==> "abcd" + tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + ``` + + Args: + inputs: A `Tensor` of type `string`. + The input to be joined. All reduced indices must have non-zero size. + reduction_indices: A `Tensor` of type `int32`. + The dimensions to reduce over. Dimensions are reduced in the + order specified. Omitting `reduction_indices` is equivalent to passing + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. + keep_dims: An optional `bool`. Defaults to `False`. + If `True`, retain reduced dimensions with length `1`. + separator: An optional `string`. Defaults to `""`. + The separator to use when joining. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ReduceJoin", name, inputs, reduction_indices, "keep_dims", + keep_dims, "separator", separator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return reduce_join_eager_fallback( + inputs, reduction_indices, keep_dims=keep_dims, separator=separator, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ReduceJoin", inputs=inputs, reduction_indices=reduction_indices, + keep_dims=keep_dims, separator=separator, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "separator", + _op.get_attr("separator")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ReduceJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ReduceJoin = tf_export("raw_ops.ReduceJoin")(_ops.to_raw_op(reduce_join)) + + +def reduce_join_eager_fallback(inputs: Annotated[Any, _atypes.String], reduction_indices: Annotated[Any, _atypes.Int32], keep_dims: bool, separator: str, name, ctx) -> Annotated[Any, _atypes.String]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + inputs = _ops.convert_to_tensor(inputs, _dtypes.string) + reduction_indices = _ops.convert_to_tensor(reduction_indices, _dtypes.int32) + _inputs_flat = [inputs, reduction_indices] + _attrs = ("keep_dims", keep_dims, "separator", separator) + _result = _execute.execute(b"ReduceJoin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ReduceJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def regex_full_match(input: Annotated[Any, _atypes.String], pattern: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Check if the input matches the regex pattern. + + The input is a string tensor of any shape. The pattern is a scalar + string tensor which is applied to every element of the input tensor. + The boolean values (True or False) of the output tensor indicate + if the input matches the regex pattern provided. + + The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + + Examples: + + >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") + + >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") + + + Args: + input: A `Tensor` of type `string`. + A string tensor of the text to be processed. + pattern: A `Tensor` of type `string`. + A scalar string tensor containing the regular expression to match the input. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RegexFullMatch", name, input, pattern) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return regex_full_match_eager_fallback( + input, pattern, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RegexFullMatch", input=input, pattern=pattern, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RegexFullMatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RegexFullMatch = tf_export("raw_ops.RegexFullMatch")(_ops.to_raw_op(regex_full_match)) + + +def regex_full_match_eager_fallback(input: Annotated[Any, _atypes.String], pattern: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Bool]: + input = _ops.convert_to_tensor(input, _dtypes.string) + pattern = _ops.convert_to_tensor(pattern, _dtypes.string) + _inputs_flat = [input, pattern] + _attrs = None + _result = _execute.execute(b"RegexFullMatch", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RegexFullMatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def regex_replace(input: Annotated[Any, _atypes.String], pattern: Annotated[Any, _atypes.String], rewrite: Annotated[Any, _atypes.String], replace_global:bool=True, name=None) -> Annotated[Any, _atypes.String]: + r"""Replaces matches of the `pattern` regular expression in `input` with the +replacement string provided in `rewrite`. + + It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + + Args: + input: A `Tensor` of type `string`. The text to be processed. + pattern: A `Tensor` of type `string`. + The regular expression to be matched in the `input` strings. + rewrite: A `Tensor` of type `string`. + The rewrite string to be substituted for the `pattern` expression where it is + matched in the `input` strings. + replace_global: An optional `bool`. Defaults to `True`. + If True, the replacement is global (that is, all matches of the `pattern` regular + expression in each input string are rewritten), otherwise the `rewrite` + substitution is only made for the first `pattern` match. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RegexReplace", name, input, pattern, rewrite, "replace_global", + replace_global) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return regex_replace_eager_fallback( + input, pattern, rewrite, replace_global=replace_global, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if replace_global is None: + replace_global = True + replace_global = _execute.make_bool(replace_global, "replace_global") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RegexReplace", input=input, pattern=pattern, rewrite=rewrite, + replace_global=replace_global, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("replace_global", _op._get_attr_bool("replace_global")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RegexReplace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RegexReplace = tf_export("raw_ops.RegexReplace")(_ops.to_raw_op(regex_replace)) + + +def regex_replace_eager_fallback(input: Annotated[Any, _atypes.String], pattern: Annotated[Any, _atypes.String], rewrite: Annotated[Any, _atypes.String], replace_global: bool, name, ctx) -> Annotated[Any, _atypes.String]: + if replace_global is None: + replace_global = True + replace_global = _execute.make_bool(replace_global, "replace_global") + input = _ops.convert_to_tensor(input, _dtypes.string) + pattern = _ops.convert_to_tensor(pattern, _dtypes.string) + rewrite = _ops.convert_to_tensor(rewrite, _dtypes.string) + _inputs_flat = [input, pattern, rewrite] + _attrs = ("replace_global", replace_global) + _result = _execute.execute(b"RegexReplace", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RegexReplace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def static_regex_full_match(input: Annotated[Any, _atypes.String], pattern: str, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Check if the input matches the regex pattern. + + The input is a string tensor of any shape. The pattern is the + regular expression to be matched with every element of the input tensor. + The boolean values (True or False) of the output tensor indicate + if the input matches the regex pattern provided. + + The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + + Args: + input: A `Tensor` of type `string`. + A string tensor of the text to be processed. + pattern: A `string`. The regular expression to match the input. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StaticRegexFullMatch", name, input, "pattern", pattern) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return static_regex_full_match_eager_fallback( + input, pattern=pattern, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + pattern = _execute.make_str(pattern, "pattern") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StaticRegexFullMatch", input=input, pattern=pattern, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("pattern", _op.get_attr("pattern")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StaticRegexFullMatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StaticRegexFullMatch = tf_export("raw_ops.StaticRegexFullMatch")(_ops.to_raw_op(static_regex_full_match)) + + +def static_regex_full_match_eager_fallback(input: Annotated[Any, _atypes.String], pattern: str, name, ctx) -> Annotated[Any, _atypes.Bool]: + pattern = _execute.make_str(pattern, "pattern") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("pattern", pattern) + _result = _execute.execute(b"StaticRegexFullMatch", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StaticRegexFullMatch", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def static_regex_replace(input: Annotated[Any, _atypes.String], pattern: str, rewrite: str, replace_global:bool=True, name=None) -> Annotated[Any, _atypes.String]: + r"""Replaces the match of pattern in input with rewrite. + + It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + + Args: + input: A `Tensor` of type `string`. The text to be processed. + pattern: A `string`. The regular expression to match the input. + rewrite: A `string`. The rewrite to be applied to the matched expression. + replace_global: An optional `bool`. Defaults to `True`. + If True, the replacement is global, otherwise the replacement + is done only on the first match. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StaticRegexReplace", name, input, "pattern", pattern, + "rewrite", rewrite, "replace_global", replace_global) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return static_regex_replace_eager_fallback( + input, pattern=pattern, rewrite=rewrite, + replace_global=replace_global, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + pattern = _execute.make_str(pattern, "pattern") + rewrite = _execute.make_str(rewrite, "rewrite") + if replace_global is None: + replace_global = True + replace_global = _execute.make_bool(replace_global, "replace_global") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StaticRegexReplace", input=input, pattern=pattern, rewrite=rewrite, + replace_global=replace_global, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("pattern", _op.get_attr("pattern"), "rewrite", + _op.get_attr("rewrite"), "replace_global", + _op._get_attr_bool("replace_global")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StaticRegexReplace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StaticRegexReplace = tf_export("raw_ops.StaticRegexReplace")(_ops.to_raw_op(static_regex_replace)) + + +def static_regex_replace_eager_fallback(input: Annotated[Any, _atypes.String], pattern: str, rewrite: str, replace_global: bool, name, ctx) -> Annotated[Any, _atypes.String]: + pattern = _execute.make_str(pattern, "pattern") + rewrite = _execute.make_str(rewrite, "rewrite") + if replace_global is None: + replace_global = True + replace_global = _execute.make_bool(replace_global, "replace_global") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("pattern", pattern, "rewrite", rewrite, "replace_global", + replace_global) + _result = _execute.execute(b"StaticRegexReplace", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StaticRegexReplace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def string_format(inputs, template:str="%s", placeholder:str="%s", summarize:int=3, name=None) -> Annotated[Any, _atypes.String]: + r"""Formats a string template using a list of tensors. + + Formats a string template using a list of tensors, pretty-printing tensor summaries. + + Args: + inputs: A list of `Tensor` objects. + The list of tensors to format into the placeholder string. + template: An optional `string`. Defaults to `"%s"`. + A string, the template to format tensor summaries into. + placeholder: An optional `string`. Defaults to `"%s"`. + A string, at each placeholder in the template a subsequent tensor summary will be inserted. + summarize: An optional `int`. Defaults to `3`. + When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringFormat", name, inputs, "template", template, + "placeholder", placeholder, "summarize", summarize) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_format_eager_fallback( + inputs, template=template, placeholder=placeholder, + summarize=summarize, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if template is None: + template = "%s" + template = _execute.make_str(template, "template") + if placeholder is None: + placeholder = "%s" + placeholder = _execute.make_str(placeholder, "placeholder") + if summarize is None: + summarize = 3 + summarize = _execute.make_int(summarize, "summarize") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringFormat", inputs=inputs, template=template, + placeholder=placeholder, summarize=summarize, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op.get_attr("T"), "template", _op.get_attr("template"), + "placeholder", _op.get_attr("placeholder"), "summarize", + _op._get_attr_int("summarize")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringFormat", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringFormat = tf_export("raw_ops.StringFormat")(_ops.to_raw_op(string_format)) + + +def string_format_eager_fallback(inputs, template: str, placeholder: str, summarize: int, name, ctx) -> Annotated[Any, _atypes.String]: + if template is None: + template = "%s" + template = _execute.make_str(template, "template") + if placeholder is None: + placeholder = "%s" + placeholder = _execute.make_str(placeholder, "placeholder") + if summarize is None: + summarize = 3 + summarize = _execute.make_int(summarize, "summarize") + _attr_T, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = list(inputs) + _attrs = ("T", _attr_T, "template", template, "placeholder", placeholder, + "summarize", summarize) + _result = _execute.execute(b"StringFormat", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringFormat", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def string_join(inputs: Annotated[List[Any], _atypes.String], separator:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Joins the strings in the given list of string tensors into one tensor; + + with the given separator (default is an empty separator). + + Examples: + + >>> s = ["hello", "world", "tensorflow"] + >>> tf.strings.join(s, " ") + + + Args: + inputs: A list of `Tensor` objects with type `string`. + A list of string tensors. The tensors must all have the same shape, + or be scalars. Scalars may be mixed in; these will be broadcast to the shape + of non-scalar inputs. + separator: An optional `string`. Defaults to `""`. + string, an optional join separator. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringJoin", name, inputs, "separator", separator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_join_eager_fallback( + inputs, separator=separator, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'string_join' Op, not %r." % inputs) + _attr_N = len(inputs) + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringJoin", inputs=inputs, separator=separator, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "separator", + _op.get_attr("separator")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringJoin = tf_export("raw_ops.StringJoin")(_ops.to_raw_op(string_join)) + + +def string_join_eager_fallback(inputs: Annotated[List[Any], _atypes.String], separator: str, name, ctx) -> Annotated[Any, _atypes.String]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'string_join' Op, not %r." % inputs) + _attr_N = len(inputs) + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "separator", separator) + _result = _execute.execute(b"StringJoin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def string_length(input: Annotated[Any, _atypes.String], unit:str="BYTE", name=None) -> Annotated[Any, _atypes.Int32]: + r"""String lengths of `input`. + + Computes the length of each string given in the input tensor. + + >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + >>> tf.strings.length(strings).numpy() # default counts bytes + array([ 5, 10, 4], dtype=int32) + >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() + array([ 5, 10, 1], dtype=int32) + + Args: + input: A `Tensor` of type `string`. + The strings for which to compute the length for each element. + unit: An optional `string` from: `"BYTE", "UTF8_CHAR"`. Defaults to `"BYTE"`. + The unit that is counted to compute string length. One of: `"BYTE"` (for + the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + encoded Unicode code points in each string). Results are undefined + if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + valid UTF-8. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringLength", name, input, "unit", unit) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_length_eager_fallback( + input, unit=unit, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if unit is None: + unit = "BYTE" + unit = _execute.make_str(unit, "unit") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringLength", input=input, unit=unit, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("unit", _op.get_attr("unit")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringLength", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringLength = tf_export("raw_ops.StringLength")(_ops.to_raw_op(string_length)) + + +def string_length_eager_fallback(input: Annotated[Any, _atypes.String], unit: str, name, ctx) -> Annotated[Any, _atypes.Int32]: + if unit is None: + unit = "BYTE" + unit = _execute.make_str(unit, "unit") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("unit", unit) + _result = _execute.execute(b"StringLength", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringLength", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.lower') +def string_lower(input: Annotated[Any, _atypes.String], encoding:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Converts all uppercase characters into their respective lowercase replacements. + + Example: + + >>> tf.strings.lower("CamelCase string and ALL CAPS") + + + Args: + input: A `Tensor` of type `string`. The input to be lower-cased. + encoding: An optional `string`. Defaults to `""`. + Character encoding of `input`. Allowed values are '' and 'utf-8'. + Value '' is interpreted as ASCII. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringLower", name, input, "encoding", encoding) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_string_lower( + (input, encoding, name,), None) + if _result is not NotImplemented: + return _result + return string_lower_eager_fallback( + input, encoding=encoding, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_lower, (), dict(input=input, encoding=encoding, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_string_lower( + (input, encoding, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if encoding is None: + encoding = "" + encoding = _execute.make_str(encoding, "encoding") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringLower", input=input, encoding=encoding, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_lower, (), dict(input=input, encoding=encoding, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("encoding", _op.get_attr("encoding")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringLower", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringLower = tf_export("raw_ops.StringLower")(_ops.to_raw_op(string_lower)) +_dispatcher_for_string_lower = string_lower._tf_type_based_dispatcher.Dispatch + + +def string_lower_eager_fallback(input: Annotated[Any, _atypes.String], encoding: str, name, ctx) -> Annotated[Any, _atypes.String]: + if encoding is None: + encoding = "" + encoding = _execute.make_str(encoding, "encoding") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("encoding", encoding) + _result = _execute.execute(b"StringLower", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringLower", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_StringNGramsOutput = collections.namedtuple( + "StringNGrams", + ["ngrams", "ngrams_splits"]) + + +TV_StringNGrams_Tsplits = TypeVar("TV_StringNGrams_Tsplits", _atypes.Int32, _atypes.Int64) + +def string_n_grams(data: Annotated[Any, _atypes.String], data_splits: Annotated[Any, TV_StringNGrams_Tsplits], separator: str, ngram_widths, left_pad: str, right_pad: str, pad_width: int, preserve_short_sequences: bool, name=None): + r"""Creates ngrams from ragged string data. + + This op accepts a ragged tensor with 1 ragged dimension containing only + strings and outputs a ragged tensor with 1 ragged dimension containing ngrams + of that string, joined along the innermost axis. + + Args: + data: A `Tensor` of type `string`. + The values tensor of the ragged string tensor to make ngrams out of. Must be a + 1D string tensor. + data_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The splits tensor of the ragged string tensor to make ngrams out of. + separator: A `string`. + The string to append between elements of the token. Use "" for no separator. + ngram_widths: A list of `ints`. The sizes of the ngrams to create. + left_pad: A `string`. + The string to use to pad the left side of the ngram sequence. Only used if + pad_width != 0. + right_pad: A `string`. + The string to use to pad the right side of the ngram sequence. Only used if + pad_width != 0. + pad_width: An `int`. + The number of padding elements to add to each side of each + sequence. Note that padding will never be greater than 'ngram_widths'-1 + regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` + elements. + preserve_short_sequences: A `bool`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (ngrams, ngrams_splits). + + ngrams: A `Tensor` of type `string`. + ngrams_splits: A `Tensor`. Has the same type as `data_splits`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringNGrams", name, data, data_splits, "separator", separator, + "ngram_widths", ngram_widths, "left_pad", left_pad, "right_pad", + right_pad, "pad_width", pad_width, "preserve_short_sequences", + preserve_short_sequences) + _result = _StringNGramsOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_n_grams_eager_fallback( + data, data_splits, separator=separator, ngram_widths=ngram_widths, + left_pad=left_pad, right_pad=right_pad, pad_width=pad_width, + preserve_short_sequences=preserve_short_sequences, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + separator = _execute.make_str(separator, "separator") + if not isinstance(ngram_widths, (list, tuple)): + raise TypeError( + "Expected list for 'ngram_widths' argument to " + "'string_n_grams' Op, not %r." % ngram_widths) + ngram_widths = [_execute.make_int(_i, "ngram_widths") for _i in ngram_widths] + left_pad = _execute.make_str(left_pad, "left_pad") + right_pad = _execute.make_str(right_pad, "right_pad") + pad_width = _execute.make_int(pad_width, "pad_width") + preserve_short_sequences = _execute.make_bool(preserve_short_sequences, "preserve_short_sequences") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringNGrams", data=data, data_splits=data_splits, + separator=separator, ngram_widths=ngram_widths, + left_pad=left_pad, right_pad=right_pad, + pad_width=pad_width, + preserve_short_sequences=preserve_short_sequences, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("separator", _op.get_attr("separator"), "ngram_widths", + _op.get_attr("ngram_widths"), "left_pad", + _op.get_attr("left_pad"), "right_pad", + _op.get_attr("right_pad"), "pad_width", + _op._get_attr_int("pad_width"), "preserve_short_sequences", + _op._get_attr_bool("preserve_short_sequences"), "Tsplits", + _op._get_attr_type("Tsplits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringNGrams", _inputs_flat, _attrs, _result) + _result = _StringNGramsOutput._make(_result) + return _result + +StringNGrams = tf_export("raw_ops.StringNGrams")(_ops.to_raw_op(string_n_grams)) + + +def string_n_grams_eager_fallback(data: Annotated[Any, _atypes.String], data_splits: Annotated[Any, TV_StringNGrams_Tsplits], separator: str, ngram_widths, left_pad: str, right_pad: str, pad_width: int, preserve_short_sequences: bool, name, ctx): + separator = _execute.make_str(separator, "separator") + if not isinstance(ngram_widths, (list, tuple)): + raise TypeError( + "Expected list for 'ngram_widths' argument to " + "'string_n_grams' Op, not %r." % ngram_widths) + ngram_widths = [_execute.make_int(_i, "ngram_widths") for _i in ngram_widths] + left_pad = _execute.make_str(left_pad, "left_pad") + right_pad = _execute.make_str(right_pad, "right_pad") + pad_width = _execute.make_int(pad_width, "pad_width") + preserve_short_sequences = _execute.make_bool(preserve_short_sequences, "preserve_short_sequences") + _attr_Tsplits, (data_splits,) = _execute.args_to_matching_eager([data_splits], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + data = _ops.convert_to_tensor(data, _dtypes.string) + _inputs_flat = [data, data_splits] + _attrs = ("separator", separator, "ngram_widths", ngram_widths, "left_pad", + left_pad, "right_pad", right_pad, "pad_width", pad_width, + "preserve_short_sequences", preserve_short_sequences, "Tsplits", + _attr_Tsplits) + _result = _execute.execute(b"StringNGrams", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringNGrams", _inputs_flat, _attrs, _result) + _result = _StringNGramsOutput._make(_result) + return _result + +_StringSplitOutput = collections.namedtuple( + "StringSplit", + ["indices", "values", "shape"]) + + +def string_split(input: Annotated[Any, _atypes.String], delimiter: Annotated[Any, _atypes.String], skip_empty:bool=True, name=None): + r"""Split elements of `input` based on `delimiter` into a `SparseTensor`. + + Let N be the size of source (typically N will be the batch size). Split each + element of `input` based on `delimiter` and return a `SparseTensor` + containing the splitted tokens. Empty tokens are ignored. + + `delimiter` can be empty, or a string of split characters. If `delimiter` is an + empty string, each element of `input` is split into individual single-byte + character strings, including splitting of UTF-8 multibyte sequences. Otherwise + every character of `delimiter` is a potential split point. + + For example: + N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output + will be + + indices = [0, 0; + 0, 1; + 1, 0; + 1, 1; + 1, 2] + shape = [2, 3] + values = ['hello', 'world', 'a', 'b', 'c'] + + Args: + input: A `Tensor` of type `string`. 1-D. Strings to split. + delimiter: A `Tensor` of type `string`. + 0-D. Delimiter characters (bytes), or empty string. + skip_empty: An optional `bool`. Defaults to `True`. + A `bool`. If `True`, skip the empty strings from the result. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (indices, values, shape). + + indices: A `Tensor` of type `int64`. + values: A `Tensor` of type `string`. + shape: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringSplit", name, input, delimiter, "skip_empty", skip_empty) + _result = _StringSplitOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_split_eager_fallback( + input, delimiter, skip_empty=skip_empty, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if skip_empty is None: + skip_empty = True + skip_empty = _execute.make_bool(skip_empty, "skip_empty") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringSplit", input=input, delimiter=delimiter, + skip_empty=skip_empty, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("skip_empty", _op._get_attr_bool("skip_empty")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringSplit", _inputs_flat, _attrs, _result) + _result = _StringSplitOutput._make(_result) + return _result + +StringSplit = tf_export("raw_ops.StringSplit")(_ops.to_raw_op(string_split)) + + +def string_split_eager_fallback(input: Annotated[Any, _atypes.String], delimiter: Annotated[Any, _atypes.String], skip_empty: bool, name, ctx): + if skip_empty is None: + skip_empty = True + skip_empty = _execute.make_bool(skip_empty, "skip_empty") + input = _ops.convert_to_tensor(input, _dtypes.string) + delimiter = _ops.convert_to_tensor(delimiter, _dtypes.string) + _inputs_flat = [input, delimiter] + _attrs = ("skip_empty", skip_empty) + _result = _execute.execute(b"StringSplit", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringSplit", _inputs_flat, _attrs, _result) + _result = _StringSplitOutput._make(_result) + return _result + +_StringSplitV2Output = collections.namedtuple( + "StringSplitV2", + ["indices", "values", "shape"]) + + +def string_split_v2(input: Annotated[Any, _atypes.String], sep: Annotated[Any, _atypes.String], maxsplit:int=-1, name=None): + r"""Split elements of `source` based on `sep` into a `SparseTensor`. + + Let N be the size of source (typically N will be the batch size). Split each + element of `source` based on `sep` and return a `SparseTensor` + containing the split tokens. Empty tokens are ignored. + + For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + then the output will be + ``` + st.indices = [0, 0; + 0, 1; + 1, 0; + 1, 1; + 1, 2] + st.shape = [2, 3] + st.values = ['hello', 'world', 'a', 'b', 'c'] + ``` + + If `sep` is given, consecutive delimiters are not grouped together and are + deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and + sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + string, consecutive whitespace are regarded as a single separator, and the + result will contain no empty strings at the startor end if the string has + leading or trailing whitespace. + + Note that the above mentioned behavior matches python's str.split. + + Args: + input: A `Tensor` of type `string`. + `1-D` string `Tensor`, the strings to split. + sep: A `Tensor` of type `string`. + `0-D` string `Tensor`, the delimiter character. + maxsplit: An optional `int`. Defaults to `-1`. + An `int`. If `maxsplit > 0`, limit of the split of the result. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (indices, values, shape). + + indices: A `Tensor` of type `int64`. + values: A `Tensor` of type `string`. + shape: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringSplitV2", name, input, sep, "maxsplit", maxsplit) + _result = _StringSplitV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_split_v2_eager_fallback( + input, sep, maxsplit=maxsplit, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if maxsplit is None: + maxsplit = -1 + maxsplit = _execute.make_int(maxsplit, "maxsplit") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringSplitV2", input=input, sep=sep, maxsplit=maxsplit, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("maxsplit", _op._get_attr_int("maxsplit")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringSplitV2", _inputs_flat, _attrs, _result) + _result = _StringSplitV2Output._make(_result) + return _result + +StringSplitV2 = tf_export("raw_ops.StringSplitV2")(_ops.to_raw_op(string_split_v2)) + + +def string_split_v2_eager_fallback(input: Annotated[Any, _atypes.String], sep: Annotated[Any, _atypes.String], maxsplit: int, name, ctx): + if maxsplit is None: + maxsplit = -1 + maxsplit = _execute.make_int(maxsplit, "maxsplit") + input = _ops.convert_to_tensor(input, _dtypes.string) + sep = _ops.convert_to_tensor(sep, _dtypes.string) + _inputs_flat = [input, sep] + _attrs = ("maxsplit", maxsplit) + _result = _execute.execute(b"StringSplitV2", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringSplitV2", _inputs_flat, _attrs, _result) + _result = _StringSplitV2Output._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.strip', v1=['strings.strip', 'string_strip']) +@deprecated_endpoints('string_strip') +def string_strip(input: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.String]: + r"""Strip leading and trailing whitespaces from the Tensor. + + Examples: + + >>> tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() + array([b'TensorFlow', b'The python library'], dtype=object) + + Args: + input: A `Tensor` of type `string`. A string `Tensor` of any shape. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringStrip", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_string_strip( + (input, name,), None) + if _result is not NotImplemented: + return _result + return string_strip_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_strip, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_string_strip( + (input, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringStrip", input=input, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_strip, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringStrip", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringStrip = tf_export("raw_ops.StringStrip")(_ops.to_raw_op(string_strip)) +_dispatcher_for_string_strip = string_strip._tf_type_based_dispatcher.Dispatch + + +def string_strip_eager_fallback(input: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.String]: + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = None + _result = _execute.execute(b"StringStrip", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringStrip", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def string_to_hash_bucket(string_tensor: Annotated[Any, _atypes.String], num_buckets: int, name=None) -> Annotated[Any, _atypes.Int64]: + r"""Converts each string in the input Tensor to its hash mod by a number of buckets. + + The hash function is deterministic on the content of the string within the + process. + + Note that the hash function may change from time to time. + This functionality will be deprecated and it's recommended to use + `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. + + Args: + string_tensor: A `Tensor` of type `string`. + num_buckets: An `int` that is `>= 1`. The number of buckets. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringToHashBucket", name, string_tensor, "num_buckets", + num_buckets) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_to_hash_bucket_eager_fallback( + string_tensor, num_buckets=num_buckets, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_buckets = _execute.make_int(num_buckets, "num_buckets") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringToHashBucket", string_tensor=string_tensor, + num_buckets=num_buckets, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_buckets", _op._get_attr_int("num_buckets")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringToHashBucket", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringToHashBucket = tf_export("raw_ops.StringToHashBucket")(_ops.to_raw_op(string_to_hash_bucket)) + + +def string_to_hash_bucket_eager_fallback(string_tensor: Annotated[Any, _atypes.String], num_buckets: int, name, ctx) -> Annotated[Any, _atypes.Int64]: + num_buckets = _execute.make_int(num_buckets, "num_buckets") + string_tensor = _ops.convert_to_tensor(string_tensor, _dtypes.string) + _inputs_flat = [string_tensor] + _attrs = ("num_buckets", num_buckets) + _result = _execute.execute(b"StringToHashBucket", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringToHashBucket", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.to_hash_bucket_fast', v1=['strings.to_hash_bucket_fast', 'string_to_hash_bucket_fast']) +@deprecated_endpoints('string_to_hash_bucket_fast') +def string_to_hash_bucket_fast(input: Annotated[Any, _atypes.String], num_buckets: int, name=None) -> Annotated[Any, _atypes.Int64]: + r"""Converts each string in the input Tensor to its hash mod by a number of buckets. + + The hash function is deterministic on the content of the string within the + process and will never change. However, it is not suitable for cryptography. + This function may be used when CPU time is scarce and inputs are trusted or + unimportant. There is a risk of adversaries constructing inputs that all hash + to the same bucket. To prevent this problem, use a strong hash function with + `tf.string_to_hash_bucket_strong`. + + Examples: + + >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() + array([0, 2, 2]) + + Args: + input: A `Tensor` of type `string`. The strings to assign a hash bucket. + num_buckets: An `int` that is `>= 1`. The number of buckets. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringToHashBucketFast", name, input, "num_buckets", + num_buckets) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_string_to_hash_bucket_fast( + (input, num_buckets, name,), None) + if _result is not NotImplemented: + return _result + return string_to_hash_bucket_fast_eager_fallback( + input, num_buckets=num_buckets, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_to_hash_bucket_fast, (), dict(input=input, + num_buckets=num_buckets, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_string_to_hash_bucket_fast( + (input, num_buckets, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + num_buckets = _execute.make_int(num_buckets, "num_buckets") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringToHashBucketFast", input=input, num_buckets=num_buckets, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_to_hash_bucket_fast, (), dict(input=input, + num_buckets=num_buckets, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_buckets", _op._get_attr_int("num_buckets")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringToHashBucketFast", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringToHashBucketFast = tf_export("raw_ops.StringToHashBucketFast")(_ops.to_raw_op(string_to_hash_bucket_fast)) +_dispatcher_for_string_to_hash_bucket_fast = string_to_hash_bucket_fast._tf_type_based_dispatcher.Dispatch + + +def string_to_hash_bucket_fast_eager_fallback(input: Annotated[Any, _atypes.String], num_buckets: int, name, ctx) -> Annotated[Any, _atypes.Int64]: + num_buckets = _execute.make_int(num_buckets, "num_buckets") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("num_buckets", num_buckets) + _result = _execute.execute(b"StringToHashBucketFast", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringToHashBucketFast", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.to_hash_bucket_strong', v1=['strings.to_hash_bucket_strong', 'string_to_hash_bucket_strong']) +@deprecated_endpoints('string_to_hash_bucket_strong') +def string_to_hash_bucket_strong(input: Annotated[Any, _atypes.String], num_buckets: int, key, name=None) -> Annotated[Any, _atypes.Int64]: + r"""Converts each string in the input Tensor to its hash mod by a number of buckets. + + The hash function is deterministic on the content of the string within the + process. The hash function is a keyed hash function, where attribute `key` + defines the key of the hash function. `key` is an array of 2 elements. + + A strong hash is important when inputs may be malicious, e.g. URLs with + additional components. Adversaries could try to make their inputs hash to the + same bucket for a denial-of-service attack or to skew the results. A strong + hash can be used to make it difficult to find inputs with a skewed hash value + distribution over buckets. This requires that the hash function is + seeded by a high-entropy (random) "key" unknown to the adversary. + + The additional robustness comes at a cost of roughly 4x higher compute + time than `tf.string_to_hash_bucket_fast`. + + Examples: + + >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + array([2, 0]) + + Args: + input: A `Tensor` of type `string`. The strings to assign a hash bucket. + num_buckets: An `int` that is `>= 1`. The number of buckets. + key: A list of `ints`. + The key used to seed the hash function, passed as a list of two uint64 + elements. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringToHashBucketStrong", name, input, "num_buckets", + num_buckets, "key", key) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_string_to_hash_bucket_strong( + (input, num_buckets, key, name,), None) + if _result is not NotImplemented: + return _result + return string_to_hash_bucket_strong_eager_fallback( + input, num_buckets=num_buckets, key=key, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_to_hash_bucket_strong, (), dict(input=input, + num_buckets=num_buckets, + key=key, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_string_to_hash_bucket_strong( + (input, num_buckets, key, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + num_buckets = _execute.make_int(num_buckets, "num_buckets") + if not isinstance(key, (list, tuple)): + raise TypeError( + "Expected list for 'key' argument to " + "'string_to_hash_bucket_strong' Op, not %r." % key) + key = [_execute.make_int(_i, "key") for _i in key] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringToHashBucketStrong", input=input, num_buckets=num_buckets, + key=key, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_to_hash_bucket_strong, (), dict(input=input, + num_buckets=num_buckets, + key=key, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_buckets", _op._get_attr_int("num_buckets"), "key", + _op.get_attr("key")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringToHashBucketStrong", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringToHashBucketStrong = tf_export("raw_ops.StringToHashBucketStrong")(_ops.to_raw_op(string_to_hash_bucket_strong)) +_dispatcher_for_string_to_hash_bucket_strong = string_to_hash_bucket_strong._tf_type_based_dispatcher.Dispatch + + +def string_to_hash_bucket_strong_eager_fallback(input: Annotated[Any, _atypes.String], num_buckets: int, key, name, ctx) -> Annotated[Any, _atypes.Int64]: + num_buckets = _execute.make_int(num_buckets, "num_buckets") + if not isinstance(key, (list, tuple)): + raise TypeError( + "Expected list for 'key' argument to " + "'string_to_hash_bucket_strong' Op, not %r." % key) + key = [_execute.make_int(_i, "key") for _i in key] + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("num_buckets", num_buckets, "key", key) + _result = _execute.execute(b"StringToHashBucketStrong", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringToHashBucketStrong", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.upper') +def string_upper(input: Annotated[Any, _atypes.String], encoding:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Converts all lowercase characters into their respective uppercase replacements. + + Example: + + >>> tf.strings.upper("CamelCase string and ALL CAPS") + + + Args: + input: A `Tensor` of type `string`. The input to be upper-cased. + encoding: An optional `string`. Defaults to `""`. + Character encoding of `input`. Allowed values are '' and 'utf-8'. + Value '' is interpreted as ASCII. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringUpper", name, input, "encoding", encoding) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_string_upper( + (input, encoding, name,), None) + if _result is not NotImplemented: + return _result + return string_upper_eager_fallback( + input, encoding=encoding, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_upper, (), dict(input=input, encoding=encoding, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_string_upper( + (input, encoding, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if encoding is None: + encoding = "" + encoding = _execute.make_str(encoding, "encoding") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringUpper", input=input, encoding=encoding, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + string_upper, (), dict(input=input, encoding=encoding, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("encoding", _op.get_attr("encoding")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringUpper", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringUpper = tf_export("raw_ops.StringUpper")(_ops.to_raw_op(string_upper)) +_dispatcher_for_string_upper = string_upper._tf_type_based_dispatcher.Dispatch + + +def string_upper_eager_fallback(input: Annotated[Any, _atypes.String], encoding: str, name, ctx) -> Annotated[Any, _atypes.String]: + if encoding is None: + encoding = "" + encoding = _execute.make_str(encoding, "encoding") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("encoding", encoding) + _result = _execute.execute(b"StringUpper", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringUpper", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Substr_T = TypeVar("TV_Substr_T", _atypes.Int32, _atypes.Int64) + +def substr(input: Annotated[Any, _atypes.String], pos: Annotated[Any, TV_Substr_T], len: Annotated[Any, TV_Substr_T], unit:str="BYTE", name=None) -> Annotated[Any, _atypes.String]: + r"""Return substrings from `Tensor` of strings. + + For each string in the input `Tensor`, creates a substring starting at index + `pos` with a total length of `len`. + + If `len` defines a substring that would extend beyond the length of the input + string, or if `len` is negative, then as many characters as possible are used. + + A negative `pos` indicates distance within the string backwards from the end. + + If `pos` specifies an index which is out of range for any of the input strings, + then an `InvalidArgumentError` is thrown. + + `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on + Op creation. + + *NOTE*: `Substr` supports broadcasting up to two dimensions. More about + broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + --- + + Examples + + Using scalar `pos` and `len`: + + ```python + input = [b'Hello', b'World'] + position = 1 + length = 3 + + output = [b'ell', b'orl'] + ``` + + Using `pos` and `len` with same shape as `input`: + + ```python + input = [[b'ten', b'eleven', b'twelve'], + [b'thirteen', b'fourteen', b'fifteen'], + [b'sixteen', b'seventeen', b'eighteen']] + position = [[1, 2, 3], + [1, 2, 3], + [1, 2, 3]] + length = [[2, 3, 4], + [4, 3, 2], + [5, 5, 5]] + + output = [[b'en', b'eve', b'lve'], + [b'hirt', b'urt', b'te'], + [b'ixtee', b'vente', b'hteen']] + ``` + + Broadcasting `pos` and `len` onto `input`: + + ``` + input = [[b'ten', b'eleven', b'twelve'], + [b'thirteen', b'fourteen', b'fifteen'], + [b'sixteen', b'seventeen', b'eighteen'], + [b'nineteen', b'twenty', b'twentyone']] + position = [1, 2, 3] + length = [1, 2, 3] + + output = [[b'e', b'ev', b'lve'], + [b'h', b'ur', b'tee'], + [b'i', b've', b'hte'], + [b'i', b'en', b'nty']] + ``` + + Broadcasting `input` onto `pos` and `len`: + + ``` + input = b'thirteen' + position = [1, 5, 7] + length = [3, 2, 1] + + output = [b'hir', b'ee', b'n'] + ``` + + Raises: + + * `ValueError`: If the first argument cannot be converted to a + Tensor of `dtype string`. + * `InvalidArgumentError`: If indices are out of range. + * `ValueError`: If `pos` and `len` are not the same shape. + + Args: + input: A `Tensor` of type `string`. Tensor of strings + pos: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Scalar defining the position of first character in each substring + len: A `Tensor`. Must have the same type as `pos`. + Scalar defining the number of characters to include in each substring + unit: An optional `string` from: `"BYTE", "UTF8_CHAR"`. Defaults to `"BYTE"`. + The unit that is used to create the substring. One of: `"BYTE"` (for + defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 + encoded Unicode code points). The default is `"BYTE"`. Results are undefined if + `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + UTF-8. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Substr", name, input, pos, len, "unit", unit) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return substr_eager_fallback( + input, pos, len, unit=unit, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if unit is None: + unit = "BYTE" + unit = _execute.make_str(unit, "unit") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Substr", input=input, pos=pos, len=len, unit=unit, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "unit", _op.get_attr("unit")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Substr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Substr = tf_export("raw_ops.Substr")(_ops.to_raw_op(substr)) + + +def substr_eager_fallback(input: Annotated[Any, _atypes.String], pos: Annotated[Any, TV_Substr_T], len: Annotated[Any, TV_Substr_T], unit: str, name, ctx) -> Annotated[Any, _atypes.String]: + if unit is None: + unit = "BYTE" + unit = _execute.make_str(unit, "unit") + _attr_T, _inputs_T = _execute.args_to_matching_eager([pos, len], ctx, [_dtypes.int32, _dtypes.int64, ]) + (pos, len) = _inputs_T + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input, pos, len] + _attrs = ("T", _attr_T, "unit", unit) + _result = _execute.execute(b"Substr", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Substr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_UnicodeDecodeOutput = collections.namedtuple( + "UnicodeDecode", + ["row_splits", "char_values"]) + + +TV_UnicodeDecode_Tsplits = TypeVar("TV_UnicodeDecode_Tsplits", _atypes.Int32, _atypes.Int64) + +def unicode_decode(input: Annotated[Any, _atypes.String], input_encoding: str, errors:str="replace", replacement_char:int=65533, replace_control_characters:bool=False, Tsplits:TV_UnicodeDecode_Tsplits=_dtypes.int64, name=None): + r"""Decodes each string in `input` into a sequence of Unicode code points. + + The character codepoints for all strings are returned using a single vector + `char_values`, with strings expanded to characters in row-major order. + + The `row_splits` tensor indicates where the codepoints for + each input string begin and end within the `char_values` tensor. + In particular, the values for the `i`th + string (in row-major order) are stored in the slice + `[row_splits[i]:row_splits[i+1]]`. Thus: + + * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th + character in the `i`th string (in row-major order). + * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th + string (in row-major order). + + Args: + input: A `Tensor` of type `string`. + The text to be decoded. Can have any shape. Note that the output is flattened + to a vector of char values. + input_encoding: A `string`. + Text encoding of the input strings. This is any of the encodings supported + by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + errors: An optional `string` from: `"strict", "replace", "ignore"`. Defaults to `"replace"`. + Error handling policy when there is invalid formatting found in the input. + The value of 'strict' will cause the operation to produce a InvalidArgument + error on any invalid input formatting. A value of 'replace' (the default) will + cause the operation to replace any invalid formatting in the input with the + `replacement_char` codepoint. A value of 'ignore' will cause the operation to + skip any invalid formatting in the input and produce no corresponding output + character. + replacement_char: An optional `int`. Defaults to `65533`. + The replacement character codepoint to be used in place of any invalid + formatting in the input when `errors='replace'`. Any valid unicode codepoint may + be used. The default value is the default unicode replacement character is + 0xFFFD or U+65533.) + replace_control_characters: An optional `bool`. Defaults to `False`. + Whether to replace the C0 control characters (00-1F) with the + `replacement_char`. Default is false. + Tsplits: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (row_splits, char_values). + + row_splits: A `Tensor` of type `Tsplits`. + char_values: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnicodeDecode", name, input, "input_encoding", input_encoding, + "errors", errors, "replacement_char", replacement_char, + "replace_control_characters", replace_control_characters, "Tsplits", + Tsplits) + _result = _UnicodeDecodeOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unicode_decode_eager_fallback( + input, input_encoding=input_encoding, errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + Tsplits=Tsplits, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + input_encoding = _execute.make_str(input_encoding, "input_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnicodeDecode", input=input, input_encoding=input_encoding, + errors=errors, replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + Tsplits=Tsplits, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("input_encoding", _op.get_attr("input_encoding"), "errors", + _op.get_attr("errors"), "replacement_char", + _op._get_attr_int("replacement_char"), + "replace_control_characters", + _op._get_attr_bool("replace_control_characters"), "Tsplits", + _op._get_attr_type("Tsplits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnicodeDecode", _inputs_flat, _attrs, _result) + _result = _UnicodeDecodeOutput._make(_result) + return _result + +UnicodeDecode = tf_export("raw_ops.UnicodeDecode")(_ops.to_raw_op(unicode_decode)) + + +def unicode_decode_eager_fallback(input: Annotated[Any, _atypes.String], input_encoding: str, errors: str, replacement_char: int, replace_control_characters: bool, Tsplits: TV_UnicodeDecode_Tsplits, name, ctx): + input_encoding = _execute.make_str(input_encoding, "input_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("input_encoding", input_encoding, "errors", errors, + "replacement_char", replacement_char, "replace_control_characters", + replace_control_characters, "Tsplits", Tsplits) + _result = _execute.execute(b"UnicodeDecode", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnicodeDecode", _inputs_flat, _attrs, _result) + _result = _UnicodeDecodeOutput._make(_result) + return _result + +_UnicodeDecodeWithOffsetsOutput = collections.namedtuple( + "UnicodeDecodeWithOffsets", + ["row_splits", "char_values", "char_to_byte_starts"]) + + +TV_UnicodeDecodeWithOffsets_Tsplits = TypeVar("TV_UnicodeDecodeWithOffsets_Tsplits", _atypes.Int32, _atypes.Int64) + +def unicode_decode_with_offsets(input: Annotated[Any, _atypes.String], input_encoding: str, errors:str="replace", replacement_char:int=65533, replace_control_characters:bool=False, Tsplits:TV_UnicodeDecodeWithOffsets_Tsplits=_dtypes.int64, name=None): + r"""Decodes each string in `input` into a sequence of Unicode code points. + + The character codepoints for all strings are returned using a single vector + `char_values`, with strings expanded to characters in row-major order. + Similarly, the character start byte offsets are returned using a single vector + `char_to_byte_starts`, with strings expanded in row-major order. + + The `row_splits` tensor indicates where the codepoints and start offsets for + each input string begin and end within the `char_values` and + `char_to_byte_starts` tensors. In particular, the values for the `i`th + string (in row-major order) are stored in the slice + `[row_splits[i]:row_splits[i+1]]`. Thus: + + * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th + character in the `i`th string (in row-major order). + * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th + character in the `i`th string (in row-major order). + * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th + string (in row-major order). + + Args: + input: A `Tensor` of type `string`. + The text to be decoded. Can have any shape. Note that the output is flattened + to a vector of char values. + input_encoding: A `string`. + Text encoding of the input strings. This is any of the encodings supported + by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + errors: An optional `string` from: `"strict", "replace", "ignore"`. Defaults to `"replace"`. + Error handling policy when there is invalid formatting found in the input. + The value of 'strict' will cause the operation to produce a InvalidArgument + error on any invalid input formatting. A value of 'replace' (the default) will + cause the operation to replace any invalid formatting in the input with the + `replacement_char` codepoint. A value of 'ignore' will cause the operation to + skip any invalid formatting in the input and produce no corresponding output + character. + replacement_char: An optional `int`. Defaults to `65533`. + The replacement character codepoint to be used in place of any invalid + formatting in the input when `errors='replace'`. Any valid unicode codepoint may + be used. The default value is the default unicode replacement character is + 0xFFFD or U+65533.) + replace_control_characters: An optional `bool`. Defaults to `False`. + Whether to replace the C0 control characters (00-1F) with the + `replacement_char`. Default is false. + Tsplits: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (row_splits, char_values, char_to_byte_starts). + + row_splits: A `Tensor` of type `Tsplits`. + char_values: A `Tensor` of type `int32`. + char_to_byte_starts: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnicodeDecodeWithOffsets", name, input, "input_encoding", + input_encoding, "errors", errors, "replacement_char", + replacement_char, "replace_control_characters", + replace_control_characters, "Tsplits", Tsplits) + _result = _UnicodeDecodeWithOffsetsOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unicode_decode_with_offsets_eager_fallback( + input, input_encoding=input_encoding, errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + Tsplits=Tsplits, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + input_encoding = _execute.make_str(input_encoding, "input_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnicodeDecodeWithOffsets", input=input, + input_encoding=input_encoding, + errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + Tsplits=Tsplits, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("input_encoding", _op.get_attr("input_encoding"), "errors", + _op.get_attr("errors"), "replacement_char", + _op._get_attr_int("replacement_char"), + "replace_control_characters", + _op._get_attr_bool("replace_control_characters"), "Tsplits", + _op._get_attr_type("Tsplits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnicodeDecodeWithOffsets", _inputs_flat, _attrs, _result) + _result = _UnicodeDecodeWithOffsetsOutput._make(_result) + return _result + +UnicodeDecodeWithOffsets = tf_export("raw_ops.UnicodeDecodeWithOffsets")(_ops.to_raw_op(unicode_decode_with_offsets)) + + +def unicode_decode_with_offsets_eager_fallback(input: Annotated[Any, _atypes.String], input_encoding: str, errors: str, replacement_char: int, replace_control_characters: bool, Tsplits: TV_UnicodeDecodeWithOffsets_Tsplits, name, ctx): + input_encoding = _execute.make_str(input_encoding, "input_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("input_encoding", input_encoding, "errors", errors, + "replacement_char", replacement_char, "replace_control_characters", + replace_control_characters, "Tsplits", Tsplits) + _result = _execute.execute(b"UnicodeDecodeWithOffsets", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnicodeDecodeWithOffsets", _inputs_flat, _attrs, _result) + _result = _UnicodeDecodeWithOffsetsOutput._make(_result) + return _result + + +TV_UnicodeEncode_Tsplits = TypeVar("TV_UnicodeEncode_Tsplits", _atypes.Int32, _atypes.Int64) + +def unicode_encode(input_values: Annotated[Any, _atypes.Int32], input_splits: Annotated[Any, TV_UnicodeEncode_Tsplits], output_encoding: str, errors:str="replace", replacement_char:int=65533, name=None) -> Annotated[Any, _atypes.String]: + r"""Encode a tensor of ints into unicode strings. + + Returns a vector of strings, where `output[i]` is constructed by encoding the + Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]` + using `output_encoding`. + + --- + + Example: + + ``` + input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100] + input_splits = [0, 5, 10] + output_encoding = 'UTF-8' + + output = ['Hello', 'World'] + ``` + + Args: + input_values: A `Tensor` of type `int32`. + A 1D tensor containing the unicode codepoints that should be encoded. + input_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1D tensor specifying how the unicode codepoints should be split into strings. + In particular, `output[i]` is constructed by encoding the codepoints in the + slice `input_values[input_splits[i]:input_splits[i+1]]`. + output_encoding: A `string` from: `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. + Unicode encoding of the output strings. Valid encodings are: `"UTF-8", + "UTF-16-BE", and "UTF-32-BE"`. + errors: An optional `string` from: `"ignore", "replace", "strict"`. Defaults to `"replace"`. + Error handling policy when there is invalid formatting found in the input. + The value of 'strict' will cause the operation to produce a InvalidArgument + error on any invalid input formatting. A value of 'replace' (the default) will + cause the operation to replace any invalid formatting in the input with the + `replacement_char` codepoint. A value of 'ignore' will cause the operation to + skip any invalid formatting in the input and produce no corresponding output + character. + replacement_char: An optional `int`. Defaults to `65533`. + The replacement character codepoint to be used in place of any invalid + formatting in the input when `errors='replace'`. Any valid unicode codepoint may + be used. The default value is the default unicode replacement character is + 0xFFFD (U+65533). + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnicodeEncode", name, input_values, input_splits, "errors", + errors, "output_encoding", output_encoding, "replacement_char", + replacement_char) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unicode_encode_eager_fallback( + input_values, input_splits, errors=errors, + output_encoding=output_encoding, replacement_char=replacement_char, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + output_encoding = _execute.make_str(output_encoding, "output_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnicodeEncode", input_values=input_values, input_splits=input_splits, + output_encoding=output_encoding, errors=errors, + replacement_char=replacement_char, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("errors", _op.get_attr("errors"), "output_encoding", + _op.get_attr("output_encoding"), "replacement_char", + _op._get_attr_int("replacement_char"), "Tsplits", + _op._get_attr_type("Tsplits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnicodeEncode", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnicodeEncode = tf_export("raw_ops.UnicodeEncode")(_ops.to_raw_op(unicode_encode)) + + +def unicode_encode_eager_fallback(input_values: Annotated[Any, _atypes.Int32], input_splits: Annotated[Any, TV_UnicodeEncode_Tsplits], output_encoding: str, errors: str, replacement_char: int, name, ctx) -> Annotated[Any, _atypes.String]: + output_encoding = _execute.make_str(output_encoding, "output_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + _attr_Tsplits, (input_splits,) = _execute.args_to_matching_eager([input_splits], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + input_values = _ops.convert_to_tensor(input_values, _dtypes.int32) + _inputs_flat = [input_values, input_splits] + _attrs = ("errors", errors, "output_encoding", output_encoding, + "replacement_char", replacement_char, "Tsplits", _attr_Tsplits) + _result = _execute.execute(b"UnicodeEncode", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnicodeEncode", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.unicode_script') +def unicode_script(input: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.Int32]: + r"""Determine the script codes of a given tensor of Unicode integer code points. + + This operation converts Unicode code points to script codes corresponding to + each code point. Script codes correspond to International Components for + Unicode (ICU) UScriptCode values. + + See + [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) + for more details on script codes. + + For an example, see the unicode strings guide on [unicode scripts] + (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). + + Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will + match input shape. + + Examples: + + >>> tf.strings.unicode_script([1, 31, 38]) + + + Args: + input: A `Tensor` of type `int32`. A Tensor of int32 Unicode code points. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnicodeScript", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unicode_script( + (input, name,), None) + if _result is not NotImplemented: + return _result + return unicode_script_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unicode_script, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unicode_script( + (input, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnicodeScript", input=input, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unicode_script, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnicodeScript", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnicodeScript = tf_export("raw_ops.UnicodeScript")(_ops.to_raw_op(unicode_script)) +_dispatcher_for_unicode_script = unicode_script._tf_type_based_dispatcher.Dispatch + + +def unicode_script_eager_fallback(input: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.Int32]: + input = _ops.convert_to_tensor(input, _dtypes.int32) + _inputs_flat = [input] + _attrs = None + _result = _execute.execute(b"UnicodeScript", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnicodeScript", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('strings.unicode_transcode') +def unicode_transcode(input: Annotated[Any, _atypes.String], input_encoding: str, output_encoding: str, errors:str="replace", replacement_char:int=65533, replace_control_characters:bool=False, name=None) -> Annotated[Any, _atypes.String]: + r"""Transcode the input text from a source encoding to a destination encoding. + + The input is a string tensor of any shape. The output is a string tensor of + the same shape containing the transcoded strings. Output strings are always + valid unicode. If the input contains invalid encoding positions, the + `errors` attribute sets the policy for how to deal with them. If the default + error-handling policy is used, invalid formatting will be substituted in the + output by the `replacement_char`. If the errors policy is to `ignore`, any + invalid encoding positions in the input are skipped and not included in the + output. If it set to `strict` then any invalid formatting will result in an + InvalidArgument error. + + This operation can be used with `output_encoding = input_encoding` to enforce + correct formatting for inputs even if they are already in the desired encoding. + + If the input is prefixed by a Byte Order Mark needed to determine encoding + (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that + BOM will be consumed and not emitted into the output. If the input encoding + is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is + interpreted as a non-breaking-space and is preserved in the output (including + always for UTF-8). + + The end result is that if the input is marked as an explicit endianness the + transcoding is faithful to all codepoints in the source. If it is not marked + with an explicit endianness, the BOM is not considered part of the string itself + but as metadata, and so is not preserved in the output. + + Examples: + + >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") + + >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() + array([b'A', b'B', b'C'], dtype=object) + + Args: + input: A `Tensor` of type `string`. + The text to be processed. Can have any shape. + input_encoding: A `string`. + Text encoding of the input strings. This is any of the encodings supported + by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + output_encoding: A `string` from: `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. + The unicode encoding to use in the output. Must be one of + `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. + errors: An optional `string` from: `"strict", "replace", "ignore"`. Defaults to `"replace"`. + Error handling policy when there is invalid formatting found in the input. + The value of 'strict' will cause the operation to produce a InvalidArgument + error on any invalid input formatting. A value of 'replace' (the default) will + cause the operation to replace any invalid formatting in the input with the + `replacement_char` codepoint. A value of 'ignore' will cause the operation to + skip any invalid formatting in the input and produce no corresponding output + character. + replacement_char: An optional `int`. Defaults to `65533`. + The replacement character codepoint to be used in place of any invalid + formatting in the input when `errors='replace'`. Any valid unicode codepoint may + be used. The default value is the default unicode replacement character is + 0xFFFD or U+65533.) + + Note that for UTF-8, passing a replacement character expressible in 1 byte, such + as ' ', will preserve string alignment to the source since invalid bytes will be + replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte + replacement character will preserve byte alignment to the source. + replace_control_characters: An optional `bool`. Defaults to `False`. + Whether to replace the C0 control characters (00-1F) with the + `replacement_char`. Default is false. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnicodeTranscode", name, input, "input_encoding", + input_encoding, "output_encoding", output_encoding, "errors", errors, + "replacement_char", replacement_char, "replace_control_characters", + replace_control_characters) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unicode_transcode( + (input, input_encoding, output_encoding, errors, replacement_char, + replace_control_characters, name,), None) + if _result is not NotImplemented: + return _result + return unicode_transcode_eager_fallback( + input, input_encoding=input_encoding, + output_encoding=output_encoding, errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unicode_transcode, (), dict(input=input, + input_encoding=input_encoding, + output_encoding=output_encoding, + errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unicode_transcode( + (input, input_encoding, output_encoding, errors, replacement_char, + replace_control_characters, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + input_encoding = _execute.make_str(input_encoding, "input_encoding") + output_encoding = _execute.make_str(output_encoding, "output_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnicodeTranscode", input=input, input_encoding=input_encoding, + output_encoding=output_encoding, errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unicode_transcode, (), dict(input=input, + input_encoding=input_encoding, + output_encoding=output_encoding, + errors=errors, + replacement_char=replacement_char, + replace_control_characters=replace_control_characters, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("input_encoding", _op.get_attr("input_encoding"), + "output_encoding", _op.get_attr("output_encoding"), "errors", + _op.get_attr("errors"), "replacement_char", + _op._get_attr_int("replacement_char"), + "replace_control_characters", + _op._get_attr_bool("replace_control_characters")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnicodeTranscode", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnicodeTranscode = tf_export("raw_ops.UnicodeTranscode")(_ops.to_raw_op(unicode_transcode)) +_dispatcher_for_unicode_transcode = unicode_transcode._tf_type_based_dispatcher.Dispatch + + +def unicode_transcode_eager_fallback(input: Annotated[Any, _atypes.String], input_encoding: str, output_encoding: str, errors: str, replacement_char: int, replace_control_characters: bool, name, ctx) -> Annotated[Any, _atypes.String]: + input_encoding = _execute.make_str(input_encoding, "input_encoding") + output_encoding = _execute.make_str(output_encoding, "output_encoding") + if errors is None: + errors = "replace" + errors = _execute.make_str(errors, "errors") + if replacement_char is None: + replacement_char = 65533 + replacement_char = _execute.make_int(replacement_char, "replacement_char") + if replace_control_characters is None: + replace_control_characters = False + replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters") + input = _ops.convert_to_tensor(input, _dtypes.string) + _inputs_flat = [input] + _attrs = ("input_encoding", input_encoding, "output_encoding", + output_encoding, "errors", errors, "replacement_char", replacement_char, + "replace_control_characters", replace_control_characters) + _result = _execute.execute(b"UnicodeTranscode", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnicodeTranscode", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnsortedSegmentJoin_Tindices = TypeVar("TV_UnsortedSegmentJoin_Tindices", _atypes.Int32, _atypes.Int64) +TV_UnsortedSegmentJoin_Tnumsegments = TypeVar("TV_UnsortedSegmentJoin_Tnumsegments", _atypes.Int32, _atypes.Int64) + +def unsorted_segment_join(inputs: Annotated[Any, _atypes.String], segment_ids: Annotated[Any, TV_UnsortedSegmentJoin_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentJoin_Tnumsegments], separator:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""TODO: add doc. + + Args: + inputs: A `Tensor` of type `string`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + separator: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnsortedSegmentJoin", name, inputs, segment_ids, num_segments, + "separator", separator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unsorted_segment_join_eager_fallback( + inputs, segment_ids, num_segments, separator=separator, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnsortedSegmentJoin", inputs=inputs, segment_ids=segment_ids, + num_segments=num_segments, separator=separator, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("separator", _op.get_attr("separator"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnsortedSegmentJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnsortedSegmentJoin = tf_export("raw_ops.UnsortedSegmentJoin")(_ops.to_raw_op(unsorted_segment_join)) + + +def unsorted_segment_join_eager_fallback(inputs: Annotated[Any, _atypes.String], segment_ids: Annotated[Any, TV_UnsortedSegmentJoin_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentJoin_Tnumsegments], separator: str, name, ctx) -> Annotated[Any, _atypes.String]: + if separator is None: + separator = "" + separator = _execute.make_str(separator, "separator") + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + inputs = _ops.convert_to_tensor(inputs, _dtypes.string) + _inputs_flat = [inputs, segment_ids, num_segments] + _attrs = ("separator", separator, "Tindices", _attr_Tindices, + "Tnumsegments", _attr_Tnumsegments) + _result = _execute.execute(b"UnsortedSegmentJoin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnsortedSegmentJoin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_training_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_training_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..6709891b53b859b801ca0b3960dee8d2b4b7cf24 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_training_ops.py @@ -0,0 +1,4350 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_ApplyAdaMax_T = TypeVar("TV_ApplyAdaMax_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_ada_max(var: Annotated[Any, TV_ApplyAdaMax_T], m: Annotated[Any, TV_ApplyAdaMax_T], v: Annotated[Any, TV_ApplyAdaMax_T], beta1_power: Annotated[Any, TV_ApplyAdaMax_T], lr: Annotated[Any, TV_ApplyAdaMax_T], beta1: Annotated[Any, TV_ApplyAdaMax_T], beta2: Annotated[Any, TV_ApplyAdaMax_T], epsilon: Annotated[Any, TV_ApplyAdaMax_T], grad: Annotated[Any, TV_ApplyAdaMax_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyAdaMax_T]: + r"""Update '*var' according to the AdaMax algorithm. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + v_t <- max(beta2 * v_{t-1}, abs(g)) + variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + m: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + v: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + beta1_power: A `Tensor`. Must have the same type as `var`. + Must be a scalar. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + beta1: A `Tensor`. Must have the same type as `var`. + Momentum factor. Must be a scalar. + beta2: A `Tensor`. Must have the same type as `var`. + Momentum factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, m, and v tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power, lr=lr, + beta1=beta1, beta2=beta2, epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdaMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdaMax = tf_export("raw_ops.ApplyAdaMax")(_ops.to_raw_op(apply_ada_max)) + + +def apply_ada_max_eager_fallback(var: Annotated[Any, TV_ApplyAdaMax_T], m: Annotated[Any, TV_ApplyAdaMax_T], v: Annotated[Any, TV_ApplyAdaMax_T], beta1_power: Annotated[Any, TV_ApplyAdaMax_T], lr: Annotated[Any, TV_ApplyAdaMax_T], beta1: Annotated[Any, TV_ApplyAdaMax_T], beta2: Annotated[Any, TV_ApplyAdaMax_T], epsilon: Annotated[Any, TV_ApplyAdaMax_T], grad: Annotated[Any, TV_ApplyAdaMax_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyAdaMax_T]: + raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAdadelta_T = TypeVar("TV_ApplyAdadelta_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_adadelta(var: Annotated[Any, TV_ApplyAdadelta_T], accum: Annotated[Any, TV_ApplyAdadelta_T], accum_update: Annotated[Any, TV_ApplyAdadelta_T], lr: Annotated[Any, TV_ApplyAdadelta_T], rho: Annotated[Any, TV_ApplyAdadelta_T], epsilon: Annotated[Any, TV_ApplyAdadelta_T], grad: Annotated[Any, TV_ApplyAdadelta_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyAdadelta_T]: + r"""Update '*var' according to the adadelta scheme. + + accum = rho() * accum + (1 - rho()) * grad.square(); + update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + update_accum = rho() * update_accum + (1 - rho()) * update.square(); + var -= update; + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + accum_update: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var, accum and update_accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdadelta", var=var, accum=accum, accum_update=accum_update, + lr=lr, rho=rho, epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdadelta", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdadelta = tf_export("raw_ops.ApplyAdadelta")(_ops.to_raw_op(apply_adadelta)) + + +def apply_adadelta_eager_fallback(var: Annotated[Any, TV_ApplyAdadelta_T], accum: Annotated[Any, TV_ApplyAdadelta_T], accum_update: Annotated[Any, TV_ApplyAdadelta_T], lr: Annotated[Any, TV_ApplyAdadelta_T], rho: Annotated[Any, TV_ApplyAdadelta_T], epsilon: Annotated[Any, TV_ApplyAdadelta_T], grad: Annotated[Any, TV_ApplyAdadelta_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyAdadelta_T]: + raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAdagrad_T = TypeVar("TV_ApplyAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_adagrad(var: Annotated[Any, TV_ApplyAdagrad_T], accum: Annotated[Any, TV_ApplyAdagrad_T], lr: Annotated[Any, TV_ApplyAdagrad_T], grad: Annotated[Any, TV_ApplyAdagrad_T], use_locking:bool=False, update_slots:bool=True, name=None) -> Annotated[Any, TV_ApplyAdagrad_T]: + r"""Update '*var' according to the adagrad scheme. + + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum)) + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, + use_locking=use_locking, update_slots=update_slots, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "update_slots", + _op._get_attr_bool("update_slots")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdagrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdagrad = tf_export("raw_ops.ApplyAdagrad")(_ops.to_raw_op(apply_adagrad)) + + +def apply_adagrad_eager_fallback(var: Annotated[Any, TV_ApplyAdagrad_T], accum: Annotated[Any, TV_ApplyAdagrad_T], lr: Annotated[Any, TV_ApplyAdagrad_T], grad: Annotated[Any, TV_ApplyAdagrad_T], use_locking: bool, update_slots: bool, name, ctx) -> Annotated[Any, TV_ApplyAdagrad_T]: + raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAdagradDA_T = TypeVar("TV_ApplyAdagradDA_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_adagrad_da(var: Annotated[Any, TV_ApplyAdagradDA_T], gradient_accumulator: Annotated[Any, TV_ApplyAdagradDA_T], gradient_squared_accumulator: Annotated[Any, TV_ApplyAdagradDA_T], grad: Annotated[Any, TV_ApplyAdagradDA_T], lr: Annotated[Any, TV_ApplyAdagradDA_T], l1: Annotated[Any, TV_ApplyAdagradDA_T], l2: Annotated[Any, TV_ApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyAdagradDA_T]: + r"""Update '*var' according to the proximal adagrad scheme. + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + global_step: A `Tensor` of type `int64`. + Training step number. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdagradDA", var=var, gradient_accumulator=gradient_accumulator, + gradient_squared_accumulator=gradient_squared_accumulator, + grad=grad, lr=lr, l1=l1, l2=l2, + global_step=global_step, use_locking=use_locking, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdagradDA", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdagradDA = tf_export("raw_ops.ApplyAdagradDA")(_ops.to_raw_op(apply_adagrad_da)) + + +def apply_adagrad_da_eager_fallback(var: Annotated[Any, TV_ApplyAdagradDA_T], gradient_accumulator: Annotated[Any, TV_ApplyAdagradDA_T], gradient_squared_accumulator: Annotated[Any, TV_ApplyAdagradDA_T], grad: Annotated[Any, TV_ApplyAdagradDA_T], lr: Annotated[Any, TV_ApplyAdagradDA_T], l1: Annotated[Any, TV_ApplyAdagradDA_T], l2: Annotated[Any, TV_ApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyAdagradDA_T]: + raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAdagradV2_T = TypeVar("TV_ApplyAdagradV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_adagrad_v2(var: Annotated[Any, TV_ApplyAdagradV2_T], accum: Annotated[Any, TV_ApplyAdagradV2_T], lr: Annotated[Any, TV_ApplyAdagradV2_T], epsilon: Annotated[Any, TV_ApplyAdagradV2_T], grad: Annotated[Any, TV_ApplyAdagradV2_T], use_locking:bool=False, update_slots:bool=True, name=None) -> Annotated[Any, TV_ApplyAdagradV2_T]: + r"""Update '*var' according to the adagrad scheme. + + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum)) + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdagradV2", var=var, accum=accum, lr=lr, epsilon=epsilon, + grad=grad, use_locking=use_locking, + update_slots=update_slots, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "update_slots", + _op._get_attr_bool("update_slots")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdagradV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdagradV2 = tf_export("raw_ops.ApplyAdagradV2")(_ops.to_raw_op(apply_adagrad_v2)) + + +def apply_adagrad_v2_eager_fallback(var: Annotated[Any, TV_ApplyAdagradV2_T], accum: Annotated[Any, TV_ApplyAdagradV2_T], lr: Annotated[Any, TV_ApplyAdagradV2_T], epsilon: Annotated[Any, TV_ApplyAdagradV2_T], grad: Annotated[Any, TV_ApplyAdagradV2_T], use_locking: bool, update_slots: bool, name, ctx) -> Annotated[Any, TV_ApplyAdagradV2_T]: + raise RuntimeError("apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAdam_T = TypeVar("TV_ApplyAdam_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_adam(var: Annotated[Any, TV_ApplyAdam_T], m: Annotated[Any, TV_ApplyAdam_T], v: Annotated[Any, TV_ApplyAdam_T], beta1_power: Annotated[Any, TV_ApplyAdam_T], beta2_power: Annotated[Any, TV_ApplyAdam_T], lr: Annotated[Any, TV_ApplyAdam_T], beta1: Annotated[Any, TV_ApplyAdam_T], beta2: Annotated[Any, TV_ApplyAdam_T], epsilon: Annotated[Any, TV_ApplyAdam_T], grad: Annotated[Any, TV_ApplyAdam_T], use_locking:bool=False, use_nesterov:bool=False, name=None) -> Annotated[Any, TV_ApplyAdam_T]: + r"""Update '*var' according to the Adam algorithm. + + $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + m: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + v: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + beta1_power: A `Tensor`. Must have the same type as `var`. + Must be a scalar. + beta2_power: A `Tensor`. Must have the same type as `var`. + Must be a scalar. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + beta1: A `Tensor`. Must have the same type as `var`. + Momentum factor. Must be a scalar. + beta2: A `Tensor`. Must have the same type as `var`. + Momentum factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, m, and v tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, uses the nesterov update. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power, + beta2_power=beta2_power, lr=lr, beta1=beta1, beta2=beta2, + epsilon=epsilon, grad=grad, use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "use_nesterov", + _op._get_attr_bool("use_nesterov")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAdam", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAdam = tf_export("raw_ops.ApplyAdam")(_ops.to_raw_op(apply_adam)) + + +def apply_adam_eager_fallback(var: Annotated[Any, TV_ApplyAdam_T], m: Annotated[Any, TV_ApplyAdam_T], v: Annotated[Any, TV_ApplyAdam_T], beta1_power: Annotated[Any, TV_ApplyAdam_T], beta2_power: Annotated[Any, TV_ApplyAdam_T], lr: Annotated[Any, TV_ApplyAdam_T], beta1: Annotated[Any, TV_ApplyAdam_T], beta2: Annotated[Any, TV_ApplyAdam_T], epsilon: Annotated[Any, TV_ApplyAdam_T], grad: Annotated[Any, TV_ApplyAdam_T], use_locking: bool, use_nesterov: bool, name, ctx) -> Annotated[Any, TV_ApplyAdam_T]: + raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyAddSign_T = TypeVar("TV_ApplyAddSign_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_add_sign(var: Annotated[Any, TV_ApplyAddSign_T], m: Annotated[Any, TV_ApplyAddSign_T], lr: Annotated[Any, TV_ApplyAddSign_T], alpha: Annotated[Any, TV_ApplyAddSign_T], sign_decay: Annotated[Any, TV_ApplyAddSign_T], beta: Annotated[Any, TV_ApplyAddSign_T], grad: Annotated[Any, TV_ApplyAddSign_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyAddSign_T]: + r"""Update '*var' according to the AddSign update. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + update <- (alpha + sign_decay * sign(g) *sign(m)) * g + variable <- variable - lr_t * update + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + m: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + alpha: A `Tensor`. Must have the same type as `var`. Must be a scalar. + sign_decay: A `Tensor`. Must have the same type as `var`. + Must be a scalar. + beta: A `Tensor`. Must have the same type as `var`. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and m tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyAddSign", var=var, m=m, lr=lr, alpha=alpha, + sign_decay=sign_decay, beta=beta, grad=grad, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyAddSign", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyAddSign = tf_export("raw_ops.ApplyAddSign")(_ops.to_raw_op(apply_add_sign)) + + +def apply_add_sign_eager_fallback(var: Annotated[Any, TV_ApplyAddSign_T], m: Annotated[Any, TV_ApplyAddSign_T], lr: Annotated[Any, TV_ApplyAddSign_T], alpha: Annotated[Any, TV_ApplyAddSign_T], sign_decay: Annotated[Any, TV_ApplyAddSign_T], beta: Annotated[Any, TV_ApplyAddSign_T], grad: Annotated[Any, TV_ApplyAddSign_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyAddSign_T]: + raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyCenteredRMSProp_T = TypeVar("TV_ApplyCenteredRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_centered_rms_prop(var: Annotated[Any, TV_ApplyCenteredRMSProp_T], mg: Annotated[Any, TV_ApplyCenteredRMSProp_T], ms: Annotated[Any, TV_ApplyCenteredRMSProp_T], mom: Annotated[Any, TV_ApplyCenteredRMSProp_T], lr: Annotated[Any, TV_ApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ApplyCenteredRMSProp_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyCenteredRMSProp_T]: + r"""Update '*var' according to the centered RMSProp algorithm. + + The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory. + + Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + + mg <- rho * mg_{t-1} + (1-rho) * grad + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + var <- var - mom + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + mg: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + ms: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + mom: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `var`. + Momentum Scale. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, mg, ms, and mom tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, + rho=rho, momentum=momentum, epsilon=epsilon, + grad=grad, use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyCenteredRMSProp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyCenteredRMSProp = tf_export("raw_ops.ApplyCenteredRMSProp")(_ops.to_raw_op(apply_centered_rms_prop)) + + +def apply_centered_rms_prop_eager_fallback(var: Annotated[Any, TV_ApplyCenteredRMSProp_T], mg: Annotated[Any, TV_ApplyCenteredRMSProp_T], ms: Annotated[Any, TV_ApplyCenteredRMSProp_T], mom: Annotated[Any, TV_ApplyCenteredRMSProp_T], lr: Annotated[Any, TV_ApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ApplyCenteredRMSProp_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyCenteredRMSProp_T]: + raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyFtrl_T = TypeVar("TV_ApplyFtrl_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_ftrl(var: Annotated[Any, TV_ApplyFtrl_T], accum: Annotated[Any, TV_ApplyFtrl_T], linear: Annotated[Any, TV_ApplyFtrl_T], grad: Annotated[Any, TV_ApplyFtrl_T], lr: Annotated[Any, TV_ApplyFtrl_T], l1: Annotated[Any, TV_ApplyFtrl_T], l2: Annotated[Any, TV_ApplyFtrl_T], lr_power: Annotated[Any, TV_ApplyFtrl_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None) -> Annotated[Any, TV_ApplyFtrl_T]: + r"""Update '*var' according to the Ftrl-proximal scheme. + + accum_new = accum + grad * grad + linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + linear: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + lr_power: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, lr=lr, + l1=l1, l2=l2, lr_power=lr_power, use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", + _op._get_attr_bool("multiply_linear_by_lr")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyFtrl", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyFtrl = tf_export("raw_ops.ApplyFtrl")(_ops.to_raw_op(apply_ftrl)) + + +def apply_ftrl_eager_fallback(var: Annotated[Any, TV_ApplyFtrl_T], accum: Annotated[Any, TV_ApplyFtrl_T], linear: Annotated[Any, TV_ApplyFtrl_T], grad: Annotated[Any, TV_ApplyFtrl_T], lr: Annotated[Any, TV_ApplyFtrl_T], l1: Annotated[Any, TV_ApplyFtrl_T], l2: Annotated[Any, TV_ApplyFtrl_T], lr_power: Annotated[Any, TV_ApplyFtrl_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx) -> Annotated[Any, TV_ApplyFtrl_T]: + raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyFtrlV2_T = TypeVar("TV_ApplyFtrlV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_ftrl_v2(var: Annotated[Any, TV_ApplyFtrlV2_T], accum: Annotated[Any, TV_ApplyFtrlV2_T], linear: Annotated[Any, TV_ApplyFtrlV2_T], grad: Annotated[Any, TV_ApplyFtrlV2_T], lr: Annotated[Any, TV_ApplyFtrlV2_T], l1: Annotated[Any, TV_ApplyFtrlV2_T], l2: Annotated[Any, TV_ApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ApplyFtrlV2_T], lr_power: Annotated[Any, TV_ApplyFtrlV2_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None) -> Annotated[Any, TV_ApplyFtrlV2_T]: + r"""Update '*var' according to the Ftrl-proximal scheme. + + grad_with_shrinkage = grad + 2 * l2_shrinkage * var + accum_new = accum + grad * grad + linear += grad_with_shrinkage - + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + linear: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 shrinkage regularization. Must be a scalar. + l2_shrinkage: A `Tensor`. Must have the same type as `var`. + lr_power: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, lr=lr, + l1=l1, l2=l2, l2_shrinkage=l2_shrinkage, + lr_power=lr_power, use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", + _op._get_attr_bool("multiply_linear_by_lr")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyFtrlV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyFtrlV2 = tf_export("raw_ops.ApplyFtrlV2")(_ops.to_raw_op(apply_ftrl_v2)) + + +def apply_ftrl_v2_eager_fallback(var: Annotated[Any, TV_ApplyFtrlV2_T], accum: Annotated[Any, TV_ApplyFtrlV2_T], linear: Annotated[Any, TV_ApplyFtrlV2_T], grad: Annotated[Any, TV_ApplyFtrlV2_T], lr: Annotated[Any, TV_ApplyFtrlV2_T], l1: Annotated[Any, TV_ApplyFtrlV2_T], l2: Annotated[Any, TV_ApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ApplyFtrlV2_T], lr_power: Annotated[Any, TV_ApplyFtrlV2_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx) -> Annotated[Any, TV_ApplyFtrlV2_T]: + raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyGradientDescent_T = TypeVar("TV_ApplyGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_gradient_descent(var: Annotated[Any, TV_ApplyGradientDescent_T], alpha: Annotated[Any, TV_ApplyGradientDescent_T], delta: Annotated[Any, TV_ApplyGradientDescent_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyGradientDescent_T]: + r"""Update '*var' by subtracting 'alpha' * 'delta' from it. + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + alpha: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + delta: A `Tensor`. Must have the same type as `var`. The change. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyGradientDescent", var=var, alpha=alpha, delta=delta, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyGradientDescent", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyGradientDescent = tf_export("raw_ops.ApplyGradientDescent")(_ops.to_raw_op(apply_gradient_descent)) + + +def apply_gradient_descent_eager_fallback(var: Annotated[Any, TV_ApplyGradientDescent_T], alpha: Annotated[Any, TV_ApplyGradientDescent_T], delta: Annotated[Any, TV_ApplyGradientDescent_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyGradientDescent_T]: + raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyMomentum_T = TypeVar("TV_ApplyMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_momentum(var: Annotated[Any, TV_ApplyMomentum_T], accum: Annotated[Any, TV_ApplyMomentum_T], lr: Annotated[Any, TV_ApplyMomentum_T], grad: Annotated[Any, TV_ApplyMomentum_T], momentum: Annotated[Any, TV_ApplyMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None) -> Annotated[Any, TV_ApplyMomentum_T]: + r"""Update '*var' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + accum = accum * momentum + grad + var -= lr * accum + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + momentum: A `Tensor`. Must have the same type as `var`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var - lr * momentum * accum, so in the end, the var you get is actually + var - lr * momentum * accum. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, + momentum=momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking"), "use_nesterov", + _op._get_attr_bool("use_nesterov")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyMomentum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyMomentum = tf_export("raw_ops.ApplyMomentum")(_ops.to_raw_op(apply_momentum)) + + +def apply_momentum_eager_fallback(var: Annotated[Any, TV_ApplyMomentum_T], accum: Annotated[Any, TV_ApplyMomentum_T], lr: Annotated[Any, TV_ApplyMomentum_T], grad: Annotated[Any, TV_ApplyMomentum_T], momentum: Annotated[Any, TV_ApplyMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx) -> Annotated[Any, TV_ApplyMomentum_T]: + raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyPowerSign_T = TypeVar("TV_ApplyPowerSign_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_power_sign(var: Annotated[Any, TV_ApplyPowerSign_T], m: Annotated[Any, TV_ApplyPowerSign_T], lr: Annotated[Any, TV_ApplyPowerSign_T], logbase: Annotated[Any, TV_ApplyPowerSign_T], sign_decay: Annotated[Any, TV_ApplyPowerSign_T], beta: Annotated[Any, TV_ApplyPowerSign_T], grad: Annotated[Any, TV_ApplyPowerSign_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyPowerSign_T]: + r"""Update '*var' according to the AddSign update. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + variable <- variable - lr_t * update + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + m: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + logbase: A `Tensor`. Must have the same type as `var`. Must be a scalar. + sign_decay: A `Tensor`. Must have the same type as `var`. + Must be a scalar. + beta: A `Tensor`. Must have the same type as `var`. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and m tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase, + sign_decay=sign_decay, beta=beta, grad=grad, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyPowerSign", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyPowerSign = tf_export("raw_ops.ApplyPowerSign")(_ops.to_raw_op(apply_power_sign)) + + +def apply_power_sign_eager_fallback(var: Annotated[Any, TV_ApplyPowerSign_T], m: Annotated[Any, TV_ApplyPowerSign_T], lr: Annotated[Any, TV_ApplyPowerSign_T], logbase: Annotated[Any, TV_ApplyPowerSign_T], sign_decay: Annotated[Any, TV_ApplyPowerSign_T], beta: Annotated[Any, TV_ApplyPowerSign_T], grad: Annotated[Any, TV_ApplyPowerSign_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyPowerSign_T]: + raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyProximalAdagrad_T = TypeVar("TV_ApplyProximalAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_proximal_adagrad(var: Annotated[Any, TV_ApplyProximalAdagrad_T], accum: Annotated[Any, TV_ApplyProximalAdagrad_T], lr: Annotated[Any, TV_ApplyProximalAdagrad_T], l1: Annotated[Any, TV_ApplyProximalAdagrad_T], l2: Annotated[Any, TV_ApplyProximalAdagrad_T], grad: Annotated[Any, TV_ApplyProximalAdagrad_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyProximalAdagrad_T]: + r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + + accum += grad * grad + prox_v = var - lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, l2=l2, + grad=grad, use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyProximalAdagrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyProximalAdagrad = tf_export("raw_ops.ApplyProximalAdagrad")(_ops.to_raw_op(apply_proximal_adagrad)) + + +def apply_proximal_adagrad_eager_fallback(var: Annotated[Any, TV_ApplyProximalAdagrad_T], accum: Annotated[Any, TV_ApplyProximalAdagrad_T], lr: Annotated[Any, TV_ApplyProximalAdagrad_T], l1: Annotated[Any, TV_ApplyProximalAdagrad_T], l2: Annotated[Any, TV_ApplyProximalAdagrad_T], grad: Annotated[Any, TV_ApplyProximalAdagrad_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyProximalAdagrad_T]: + raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyProximalGradientDescent_T = TypeVar("TV_ApplyProximalGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_proximal_gradient_descent(var: Annotated[Any, TV_ApplyProximalGradientDescent_T], alpha: Annotated[Any, TV_ApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ApplyProximalGradientDescent_T], delta: Annotated[Any, TV_ApplyProximalGradientDescent_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyProximalGradientDescent_T]: + r"""Update '*var' as FOBOS algorithm with fixed learning rate. + + prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + alpha: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + delta: A `Tensor`. Must have the same type as `var`. The change. + use_locking: An optional `bool`. Defaults to `False`. + If True, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, l2=l2, + delta=delta, use_locking=use_locking, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyProximalGradientDescent", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyProximalGradientDescent = tf_export("raw_ops.ApplyProximalGradientDescent")(_ops.to_raw_op(apply_proximal_gradient_descent)) + + +def apply_proximal_gradient_descent_eager_fallback(var: Annotated[Any, TV_ApplyProximalGradientDescent_T], alpha: Annotated[Any, TV_ApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ApplyProximalGradientDescent_T], delta: Annotated[Any, TV_ApplyProximalGradientDescent_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyProximalGradientDescent_T]: + raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + +TV_ApplyRMSProp_T = TypeVar("TV_ApplyRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def apply_rms_prop(var: Annotated[Any, TV_ApplyRMSProp_T], ms: Annotated[Any, TV_ApplyRMSProp_T], mom: Annotated[Any, TV_ApplyRMSProp_T], lr: Annotated[Any, TV_ApplyRMSProp_T], rho: Annotated[Any, TV_ApplyRMSProp_T], momentum: Annotated[Any, TV_ApplyRMSProp_T], epsilon: Annotated[Any, TV_ApplyRMSProp_T], grad: Annotated[Any, TV_ApplyRMSProp_T], use_locking:bool=False, name=None) -> Annotated[Any, TV_ApplyRMSProp_T]: + r"""Update '*var' according to the RMSProp algorithm. + + Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + ms: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + mom: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `var`. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, ms, and mom tensors is protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, + momentum=momentum, epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApplyRMSProp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApplyRMSProp = tf_export("raw_ops.ApplyRMSProp")(_ops.to_raw_op(apply_rms_prop)) + + +def apply_rms_prop_eager_fallback(var: Annotated[Any, TV_ApplyRMSProp_T], ms: Annotated[Any, TV_ApplyRMSProp_T], mom: Annotated[Any, TV_ApplyRMSProp_T], lr: Annotated[Any, TV_ApplyRMSProp_T], rho: Annotated[Any, TV_ApplyRMSProp_T], momentum: Annotated[Any, TV_ApplyRMSProp_T], epsilon: Annotated[Any, TV_ApplyRMSProp_T], grad: Annotated[Any, TV_ApplyRMSProp_T], use_locking: bool, name, ctx) -> Annotated[Any, TV_ApplyRMSProp_T]: + raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") + +TV_ResourceApplyAdaMax_T = TypeVar("TV_ResourceApplyAdaMax_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_ada_max(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdaMax_T], lr: Annotated[Any, TV_ResourceApplyAdaMax_T], beta1: Annotated[Any, TV_ResourceApplyAdaMax_T], beta2: Annotated[Any, TV_ResourceApplyAdaMax_T], epsilon: Annotated[Any, TV_ResourceApplyAdaMax_T], grad: Annotated[Any, TV_ResourceApplyAdaMax_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the AdaMax algorithm. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + v_t <- max(beta2 * v_{t-1}, abs(g)) + variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + m: A `Tensor` of type `resource`. Should be from a Variable(). + v: A `Tensor` of type `resource`. Should be from a Variable(). + beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Must be a scalar. + lr: A `Tensor`. Must have the same type as `beta1_power`. + Scaling factor. Must be a scalar. + beta1: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + beta2: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `beta1_power`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, m, and v tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdaMax", name, var, m, v, beta1_power, lr, beta1, + beta2, epsilon, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_ada_max_eager_fallback( + var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power, + lr=lr, beta1=beta1, beta2=beta2, + epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyAdaMax = tf_export("raw_ops.ResourceApplyAdaMax")(_ops.to_raw_op(resource_apply_ada_max)) + + +def resource_apply_ada_max_eager_fallback(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdaMax_T], lr: Annotated[Any, TV_ResourceApplyAdaMax_T], beta1: Annotated[Any, TV_ResourceApplyAdaMax_T], beta2: Annotated[Any, TV_ResourceApplyAdaMax_T], epsilon: Annotated[Any, TV_ResourceApplyAdaMax_T], grad: Annotated[Any, TV_ResourceApplyAdaMax_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (beta1_power, lr, beta1, beta2, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + m = _ops.convert_to_tensor(m, _dtypes.resource) + v = _ops.convert_to_tensor(v, _dtypes.resource) + _inputs_flat = [var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyAdaMax", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyAdadelta_T = TypeVar("TV_ResourceApplyAdadelta_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adadelta(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], accum_update: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdadelta_T], rho: Annotated[Any, TV_ResourceApplyAdadelta_T], epsilon: Annotated[Any, TV_ResourceApplyAdadelta_T], grad: Annotated[Any, TV_ResourceApplyAdadelta_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the adadelta scheme. + + accum = rho() * accum + (1 - rho()) * grad.square(); + update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + update_accum = rho() * update_accum + (1 - rho()) * update.square(); + var -= update; + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + accum_update: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `lr`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var, accum and update_accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdadelta", name, var, accum, accum_update, lr, + rho, epsilon, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adadelta_eager_fallback( + var, accum, accum_update, lr, rho, epsilon, grad, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdadelta", var=var, accum=accum, + accum_update=accum_update, lr=lr, rho=rho, + epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyAdadelta = tf_export("raw_ops.ResourceApplyAdadelta")(_ops.to_raw_op(resource_apply_adadelta)) + + +def resource_apply_adadelta_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], accum_update: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdadelta_T], rho: Annotated[Any, TV_ResourceApplyAdadelta_T], epsilon: Annotated[Any, TV_ResourceApplyAdadelta_T], grad: Annotated[Any, TV_ResourceApplyAdadelta_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource) + _inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyAdadelta", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyAdagrad_T = TypeVar("TV_ResourceApplyAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adagrad(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdagrad_T], grad: Annotated[Any, TV_ResourceApplyAdagrad_T], use_locking:bool=False, update_slots:bool=True, name=None): + r"""Update '*var' according to the adagrad scheme. + + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum)) + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdagrad", name, var, accum, lr, grad, + "use_locking", use_locking, "update_slots", update_slots) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adagrad_eager_fallback( + var, accum, lr, grad, use_locking=use_locking, + update_slots=update_slots, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, + use_locking=use_locking, + update_slots=update_slots, name=name) + return _op +ResourceApplyAdagrad = tf_export("raw_ops.ResourceApplyAdagrad")(_ops.to_raw_op(resource_apply_adagrad)) + + +def resource_apply_adagrad_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdagrad_T], grad: Annotated[Any, TV_ResourceApplyAdagrad_T], use_locking: bool, update_slots: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking, "update_slots", + update_slots) + _result = _execute.execute(b"ResourceApplyAdagrad", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyAdagradDA_T = TypeVar("TV_ResourceApplyAdagradDA_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adagrad_da(var: Annotated[Any, _atypes.Resource], gradient_accumulator: Annotated[Any, _atypes.Resource], gradient_squared_accumulator: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyAdagradDA_T], lr: Annotated[Any, TV_ResourceApplyAdagradDA_T], l1: Annotated[Any, TV_ResourceApplyAdagradDA_T], l2: Annotated[Any, TV_ResourceApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking:bool=False, name=None): + r"""Update '*var' according to the proximal adagrad scheme. + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + gradient_accumulator: A `Tensor` of type `resource`. + Should be from a Variable(). + gradient_squared_accumulator: A `Tensor` of type `resource`. + Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + lr: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 regularization. Must be a scalar. + global_step: A `Tensor` of type `int64`. + Training step number. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdagradDA", name, var, gradient_accumulator, + gradient_squared_accumulator, grad, lr, l1, l2, global_step, + "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adagrad_da_eager_fallback( + var, gradient_accumulator, gradient_squared_accumulator, grad, lr, + l1, l2, global_step, use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdagradDA", var=var, + gradient_accumulator=gradient_accumulator, + gradient_squared_accumulator=gradient_squared_accumulator, + grad=grad, lr=lr, l1=l1, l2=l2, + global_step=global_step, + use_locking=use_locking, name=name) + return _op +ResourceApplyAdagradDA = tf_export("raw_ops.ResourceApplyAdagradDA")(_ops.to_raw_op(resource_apply_adagrad_da)) + + +def resource_apply_adagrad_da_eager_fallback(var: Annotated[Any, _atypes.Resource], gradient_accumulator: Annotated[Any, _atypes.Resource], gradient_squared_accumulator: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyAdagradDA_T], lr: Annotated[Any, TV_ResourceApplyAdagradDA_T], l1: Annotated[Any, TV_ResourceApplyAdagradDA_T], l2: Annotated[Any, TV_ResourceApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource) + gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource) + global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) + _inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyAdagradDA", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyAdagradV2_T = TypeVar("TV_ResourceApplyAdagradV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adagrad_v2(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdagradV2_T], epsilon: Annotated[Any, TV_ResourceApplyAdagradV2_T], grad: Annotated[Any, TV_ResourceApplyAdagradV2_T], use_locking:bool=False, update_slots:bool=True, name=None): + r"""Update '*var' according to the adagrad scheme. + + accum += grad * grad + var -= lr * grad * (1 / (sqrt(accum) + epsilon)) + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `lr`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdagradV2", name, var, accum, lr, epsilon, grad, + "use_locking", use_locking, "update_slots", update_slots) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adagrad_v2_eager_fallback( + var, accum, lr, epsilon, grad, use_locking=use_locking, + update_slots=update_slots, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdagradV2", var=var, accum=accum, lr=lr, + epsilon=epsilon, grad=grad, + use_locking=use_locking, + update_slots=update_slots, name=name) + return _op +ResourceApplyAdagradV2 = tf_export("raw_ops.ResourceApplyAdagradV2")(_ops.to_raw_op(resource_apply_adagrad_v2)) + + +def resource_apply_adagrad_v2_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAdagradV2_T], epsilon: Annotated[Any, TV_ResourceApplyAdagradV2_T], grad: Annotated[Any, TV_ResourceApplyAdagradV2_T], use_locking: bool, update_slots: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking, "update_slots", + update_slots) + _result = _execute.execute(b"ResourceApplyAdagradV2", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyAdam_T = TypeVar("TV_ResourceApplyAdam_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adam(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdam_T], beta2_power: Annotated[Any, TV_ResourceApplyAdam_T], lr: Annotated[Any, TV_ResourceApplyAdam_T], beta1: Annotated[Any, TV_ResourceApplyAdam_T], beta2: Annotated[Any, TV_ResourceApplyAdam_T], epsilon: Annotated[Any, TV_ResourceApplyAdam_T], grad: Annotated[Any, TV_ResourceApplyAdam_T], use_locking:bool=False, use_nesterov:bool=False, name=None): + r"""Update '*var' according to the Adam algorithm. + + $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + m: A `Tensor` of type `resource`. Should be from a Variable(). + v: A `Tensor` of type `resource`. Should be from a Variable(). + beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Must be a scalar. + beta2_power: A `Tensor`. Must have the same type as `beta1_power`. + Must be a scalar. + lr: A `Tensor`. Must have the same type as `beta1_power`. + Scaling factor. Must be a scalar. + beta1: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + beta2: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `beta1_power`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, m, and v tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, uses the nesterov update. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdam", name, var, m, v, beta1_power, beta2_power, + lr, beta1, beta2, epsilon, grad, "use_locking", use_locking, + "use_nesterov", use_nesterov) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adam_eager_fallback( + var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, + grad, use_locking=use_locking, use_nesterov=use_nesterov, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power, + beta2_power=beta2_power, lr=lr, beta1=beta1, + beta2=beta2, epsilon=epsilon, grad=grad, + use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + return _op +ResourceApplyAdam = tf_export("raw_ops.ResourceApplyAdam")(_ops.to_raw_op(resource_apply_adam)) + + +def resource_apply_adam_eager_fallback(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdam_T], beta2_power: Annotated[Any, TV_ResourceApplyAdam_T], lr: Annotated[Any, TV_ResourceApplyAdam_T], beta1: Annotated[Any, TV_ResourceApplyAdam_T], beta2: Annotated[Any, TV_ResourceApplyAdam_T], epsilon: Annotated[Any, TV_ResourceApplyAdam_T], grad: Annotated[Any, TV_ResourceApplyAdam_T], use_locking: bool, use_nesterov: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + m = _ops.convert_to_tensor(m, _dtypes.resource) + v = _ops.convert_to_tensor(v, _dtypes.resource) + _inputs_flat = [var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", + use_nesterov) + _result = _execute.execute(b"ResourceApplyAdam", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyAdamWithAmsgrad_T = TypeVar("TV_ResourceApplyAdamWithAmsgrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_adam_with_amsgrad(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], vhat: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta2_power: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], lr: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta1: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta2: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], epsilon: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], grad: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the Adam algorithm. + + $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ + $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + m: A `Tensor` of type `resource`. Should be from a Variable(). + v: A `Tensor` of type `resource`. Should be from a Variable(). + vhat: A `Tensor` of type `resource`. Should be from a Variable(). + beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Must be a scalar. + beta2_power: A `Tensor`. Must have the same type as `beta1_power`. + Must be a scalar. + lr: A `Tensor`. Must have the same type as `beta1_power`. + Scaling factor. Must be a scalar. + beta1: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + beta2: A `Tensor`. Must have the same type as `beta1_power`. + Momentum factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `beta1_power`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, m, and v tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAdamWithAmsgrad", name, var, m, v, vhat, + beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, + "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_adam_with_amsgrad_eager_fallback( + var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, + epsilon, grad, use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAdamWithAmsgrad", var=var, m=m, v=v, vhat=vhat, + beta1_power=beta1_power, + beta2_power=beta2_power, lr=lr, + beta1=beta1, beta2=beta2, + epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyAdamWithAmsgrad = tf_export("raw_ops.ResourceApplyAdamWithAmsgrad")(_ops.to_raw_op(resource_apply_adam_with_amsgrad)) + + +def resource_apply_adam_with_amsgrad_eager_fallback(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], v: Annotated[Any, _atypes.Resource], vhat: Annotated[Any, _atypes.Resource], beta1_power: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta2_power: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], lr: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta1: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], beta2: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], epsilon: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], grad: Annotated[Any, TV_ResourceApplyAdamWithAmsgrad_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + m = _ops.convert_to_tensor(m, _dtypes.resource) + v = _ops.convert_to_tensor(v, _dtypes.resource) + vhat = _ops.convert_to_tensor(vhat, _dtypes.resource) + _inputs_flat = [var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyAdamWithAmsgrad", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyAddSign_T = TypeVar("TV_ResourceApplyAddSign_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_add_sign(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAddSign_T], alpha: Annotated[Any, TV_ResourceApplyAddSign_T], sign_decay: Annotated[Any, TV_ResourceApplyAddSign_T], beta: Annotated[Any, TV_ResourceApplyAddSign_T], grad: Annotated[Any, TV_ResourceApplyAddSign_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the AddSign update. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + update <- (alpha + sign_decay * sign(g) *sign(m)) * g + variable <- variable - lr_t * update + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + m: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + alpha: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + sign_decay: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and m tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyAddSign", name, var, m, lr, alpha, sign_decay, + beta, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_add_sign_eager_fallback( + var, m, lr, alpha, sign_decay, beta, grad, use_locking=use_locking, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyAddSign", var=var, m=m, lr=lr, alpha=alpha, + sign_decay=sign_decay, beta=beta, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyAddSign = tf_export("raw_ops.ResourceApplyAddSign")(_ops.to_raw_op(resource_apply_add_sign)) + + +def resource_apply_add_sign_eager_fallback(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyAddSign_T], alpha: Annotated[Any, TV_ResourceApplyAddSign_T], sign_decay: Annotated[Any, TV_ResourceApplyAddSign_T], beta: Annotated[Any, TV_ResourceApplyAddSign_T], grad: Annotated[Any, TV_ResourceApplyAddSign_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, alpha, sign_decay, beta, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, alpha, sign_decay, beta, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + m = _ops.convert_to_tensor(m, _dtypes.resource) + _inputs_flat = [var, m, lr, alpha, sign_decay, beta, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyAddSign", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyCenteredRMSProp_T = TypeVar("TV_ResourceApplyCenteredRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_centered_rms_prop(var: Annotated[Any, _atypes.Resource], mg: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the centered RMSProp algorithm. + + The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory. + + Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + + mg <- rho * mg_{t-1} + (1-rho) * grad + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + var <- var - mom + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + mg: A `Tensor` of type `resource`. Should be from a Variable(). + ms: A `Tensor` of type `resource`. Should be from a Variable(). + mom: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `lr`. + Momentum Scale. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `lr`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, mg, ms, and mom tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyCenteredRMSProp", name, var, mg, ms, mom, lr, rho, + momentum, epsilon, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_centered_rms_prop_eager_fallback( + var, mg, ms, mom, lr, rho, momentum, epsilon, grad, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, + rho=rho, momentum=momentum, + epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyCenteredRMSProp = tf_export("raw_ops.ResourceApplyCenteredRMSProp")(_ops.to_raw_op(resource_apply_centered_rms_prop)) + + +def resource_apply_centered_rms_prop_eager_fallback(var: Annotated[Any, _atypes.Resource], mg: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ResourceApplyCenteredRMSProp_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, momentum, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + mg = _ops.convert_to_tensor(mg, _dtypes.resource) + ms = _ops.convert_to_tensor(ms, _dtypes.resource) + mom = _ops.convert_to_tensor(mom, _dtypes.resource) + _inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyCenteredRMSProp", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyFtrl_T = TypeVar("TV_ResourceApplyFtrl_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_ftrl(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyFtrl_T], lr: Annotated[Any, TV_ResourceApplyFtrl_T], l1: Annotated[Any, TV_ResourceApplyFtrl_T], l2: Annotated[Any, TV_ResourceApplyFtrl_T], lr_power: Annotated[Any, TV_ResourceApplyFtrl_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None): + r"""Update '*var' according to the Ftrl-proximal scheme. + + accum_new = accum + grad * grad + linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + linear: A `Tensor` of type `resource`. Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + lr: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 regularization. Must be a scalar. + lr_power: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyFtrl", name, var, accum, linear, grad, lr, l1, l2, + lr_power, "use_locking", use_locking, "multiply_linear_by_lr", + multiply_linear_by_lr) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_ftrl_eager_fallback( + var, accum, linear, grad, lr, l1, l2, lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, + lr=lr, l1=l1, l2=l2, lr_power=lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + return _op +ResourceApplyFtrl = tf_export("raw_ops.ResourceApplyFtrl")(_ops.to_raw_op(resource_apply_ftrl)) + + +def resource_apply_ftrl_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyFtrl_T], lr: Annotated[Any, TV_ResourceApplyFtrl_T], l1: Annotated[Any, TV_ResourceApplyFtrl_T], l2: Annotated[Any, TV_ResourceApplyFtrl_T], lr_power: Annotated[Any, TV_ResourceApplyFtrl_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2, lr_power) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + linear = _ops.convert_to_tensor(linear, _dtypes.resource) + _inputs_flat = [var, accum, linear, grad, lr, l1, l2, lr_power] + _attrs = ("T", _attr_T, "use_locking", use_locking, "multiply_linear_by_lr", + multiply_linear_by_lr) + _result = _execute.execute(b"ResourceApplyFtrl", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyFtrlV2_T = TypeVar("TV_ResourceApplyFtrlV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_ftrl_v2(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyFtrlV2_T], lr: Annotated[Any, TV_ResourceApplyFtrlV2_T], l1: Annotated[Any, TV_ResourceApplyFtrlV2_T], l2: Annotated[Any, TV_ResourceApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ResourceApplyFtrlV2_T], lr_power: Annotated[Any, TV_ResourceApplyFtrlV2_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None): + r"""Update '*var' according to the Ftrl-proximal scheme. + + accum_new = accum + grad * grad + grad_with_shrinkage = grad + 2 * l2_shrinkage * var + linear += grad_with_shrinkage + + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + linear: A `Tensor` of type `resource`. Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + lr: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 shrinkage regularization. Must be a scalar. + l2_shrinkage: A `Tensor`. Must have the same type as `grad`. + lr_power: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyFtrlV2", name, var, accum, linear, grad, lr, l1, + l2, l2_shrinkage, lr_power, "use_locking", use_locking, + "multiply_linear_by_lr", multiply_linear_by_lr) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_ftrl_v2_eager_fallback( + var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, + lr=lr, l1=l1, l2=l2, l2_shrinkage=l2_shrinkage, + lr_power=lr_power, use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + return _op +ResourceApplyFtrlV2 = tf_export("raw_ops.ResourceApplyFtrlV2")(_ops.to_raw_op(resource_apply_ftrl_v2)) + + +def resource_apply_ftrl_v2_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceApplyFtrlV2_T], lr: Annotated[Any, TV_ResourceApplyFtrlV2_T], l1: Annotated[Any, TV_ResourceApplyFtrlV2_T], l2: Annotated[Any, TV_ResourceApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ResourceApplyFtrlV2_T], lr_power: Annotated[Any, TV_ResourceApplyFtrlV2_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + linear = _ops.convert_to_tensor(linear, _dtypes.resource) + _inputs_flat = [var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power] + _attrs = ("T", _attr_T, "use_locking", use_locking, "multiply_linear_by_lr", + multiply_linear_by_lr) + _result = _execute.execute(b"ResourceApplyFtrlV2", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyGradientDescent_T = TypeVar("TV_ResourceApplyGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_gradient_descent(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceApplyGradientDescent_T], delta: Annotated[Any, TV_ResourceApplyGradientDescent_T], use_locking:bool=False, name=None): + r"""Update '*var' by subtracting 'alpha' * 'delta' from it. + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + delta: A `Tensor`. Must have the same type as `alpha`. The change. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyGradientDescent", name, var, alpha, delta, + "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_gradient_descent_eager_fallback( + var, alpha, delta, use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyGradientDescent", var=var, alpha=alpha, delta=delta, + use_locking=use_locking, name=name) + return _op +ResourceApplyGradientDescent = tf_export("raw_ops.ResourceApplyGradientDescent")(_ops.to_raw_op(resource_apply_gradient_descent)) + + +def resource_apply_gradient_descent_eager_fallback(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceApplyGradientDescent_T], delta: Annotated[Any, TV_ResourceApplyGradientDescent_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, delta], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (alpha, delta) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + _inputs_flat = [var, alpha, delta] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyGradientDescent", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyKerasMomentum_T = TypeVar("TV_ResourceApplyKerasMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_keras_momentum(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyKerasMomentum_T], grad: Annotated[Any, TV_ResourceApplyKerasMomentum_T], momentum: Annotated[Any, TV_ResourceApplyKerasMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None): + r"""Update '*var' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + accum = accum * momentum - lr * grad + var += accum + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + momentum: A `Tensor`. Must have the same type as `lr`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var + momentum * accum, so in the end, the var you get is actually + var + momentum * accum. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyKerasMomentum", name, var, accum, lr, grad, + momentum, "use_locking", use_locking, "use_nesterov", use_nesterov) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_keras_momentum_eager_fallback( + var, accum, lr, grad, momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyKerasMomentum", var=var, accum=accum, lr=lr, grad=grad, + momentum=momentum, + use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + return _op +ResourceApplyKerasMomentum = tf_export("raw_ops.ResourceApplyKerasMomentum")(_ops.to_raw_op(resource_apply_keras_momentum)) + + +def resource_apply_keras_momentum_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyKerasMomentum_T], grad: Annotated[Any, TV_ResourceApplyKerasMomentum_T], momentum: Annotated[Any, TV_ResourceApplyKerasMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad, momentum) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad, momentum] + _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", + use_nesterov) + _result = _execute.execute(b"ResourceApplyKerasMomentum", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyMomentum_T = TypeVar("TV_ResourceApplyMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_momentum(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyMomentum_T], grad: Annotated[Any, TV_ResourceApplyMomentum_T], momentum: Annotated[Any, TV_ResourceApplyMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None): + r"""Update '*var' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + accum = accum * momentum + grad + var -= lr * accum + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + momentum: A `Tensor`. Must have the same type as `lr`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var - lr * momentum * accum, so in the end, the var you get is actually + var - lr * momentum * accum. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyMomentum", name, var, accum, lr, grad, momentum, + "use_locking", use_locking, "use_nesterov", use_nesterov) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_momentum_eager_fallback( + var, accum, lr, grad, momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, + momentum=momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + return _op +ResourceApplyMomentum = tf_export("raw_ops.ResourceApplyMomentum")(_ops.to_raw_op(resource_apply_momentum)) + + +def resource_apply_momentum_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyMomentum_T], grad: Annotated[Any, TV_ResourceApplyMomentum_T], momentum: Annotated[Any, TV_ResourceApplyMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad, momentum) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad, momentum] + _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", + use_nesterov) + _result = _execute.execute(b"ResourceApplyMomentum", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceApplyPowerSign_T = TypeVar("TV_ResourceApplyPowerSign_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_power_sign(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyPowerSign_T], logbase: Annotated[Any, TV_ResourceApplyPowerSign_T], sign_decay: Annotated[Any, TV_ResourceApplyPowerSign_T], beta: Annotated[Any, TV_ResourceApplyPowerSign_T], grad: Annotated[Any, TV_ResourceApplyPowerSign_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the AddSign update. + + m_t <- beta1 * m_{t-1} + (1 - beta1) * g + update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + variable <- variable - lr_t * update + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + m: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + logbase: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + sign_decay: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and m tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyPowerSign", name, var, m, lr, logbase, sign_decay, + beta, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_power_sign_eager_fallback( + var, m, lr, logbase, sign_decay, beta, grad, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase, + sign_decay=sign_decay, beta=beta, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyPowerSign = tf_export("raw_ops.ResourceApplyPowerSign")(_ops.to_raw_op(resource_apply_power_sign)) + + +def resource_apply_power_sign_eager_fallback(var: Annotated[Any, _atypes.Resource], m: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyPowerSign_T], logbase: Annotated[Any, TV_ResourceApplyPowerSign_T], sign_decay: Annotated[Any, TV_ResourceApplyPowerSign_T], beta: Annotated[Any, TV_ResourceApplyPowerSign_T], grad: Annotated[Any, TV_ResourceApplyPowerSign_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, logbase, sign_decay, beta, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, logbase, sign_decay, beta, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + m = _ops.convert_to_tensor(m, _dtypes.resource) + _inputs_flat = [var, m, lr, logbase, sign_decay, beta, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyPowerSign", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyProximalAdagrad_T = TypeVar("TV_ResourceApplyProximalAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_proximal_adagrad(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], l1: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], l2: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], grad: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], use_locking:bool=False, name=None): + r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + + accum += grad * grad + prox_v = var - lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `lr`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `lr`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyProximalAdagrad", name, var, accum, lr, l1, l2, + grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_proximal_adagrad_eager_fallback( + var, accum, lr, l1, l2, grad, use_locking=use_locking, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, + l2=l2, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyProximalAdagrad = tf_export("raw_ops.ResourceApplyProximalAdagrad")(_ops.to_raw_op(resource_apply_proximal_adagrad)) + + +def resource_apply_proximal_adagrad_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], l1: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], l2: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], grad: Annotated[Any, TV_ResourceApplyProximalAdagrad_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, l1, l2, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, l1, l2, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyProximalAdagrad", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyProximalGradientDescent_T = TypeVar("TV_ResourceApplyProximalGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_proximal_gradient_descent(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], delta: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], use_locking:bool=False, name=None): + r"""Update '*var' as FOBOS algorithm with fixed learning rate. + + prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `alpha`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `alpha`. + L2 regularization. Must be a scalar. + delta: A `Tensor`. Must have the same type as `alpha`. The change. + use_locking: An optional `bool`. Defaults to `False`. + If True, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyProximalGradientDescent", name, var, alpha, l1, + l2, delta, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_proximal_gradient_descent_eager_fallback( + var, alpha, l1, l2, delta, use_locking=use_locking, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, + l2=l2, delta=delta, + use_locking=use_locking, + name=name) + return _op +ResourceApplyProximalGradientDescent = tf_export("raw_ops.ResourceApplyProximalGradientDescent")(_ops.to_raw_op(resource_apply_proximal_gradient_descent)) + + +def resource_apply_proximal_gradient_descent_eager_fallback(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], delta: Annotated[Any, TV_ResourceApplyProximalGradientDescent_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, delta], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (alpha, l1, l2, delta) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + _inputs_flat = [var, alpha, l1, l2, delta] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyProximalGradientDescent", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceApplyRMSProp_T = TypeVar("TV_ResourceApplyRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def resource_apply_rms_prop(var: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyRMSProp_T], rho: Annotated[Any, TV_ResourceApplyRMSProp_T], momentum: Annotated[Any, TV_ResourceApplyRMSProp_T], epsilon: Annotated[Any, TV_ResourceApplyRMSProp_T], grad: Annotated[Any, TV_ResourceApplyRMSProp_T], use_locking:bool=False, name=None): + r"""Update '*var' according to the RMSProp algorithm. + + Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + ms: A `Tensor` of type `resource`. Should be from a Variable(). + mom: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `lr`. + epsilon: A `Tensor`. Must have the same type as `lr`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, ms, and mom tensors is protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceApplyRMSProp", name, var, ms, mom, lr, rho, momentum, + epsilon, grad, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_apply_rms_prop_eager_fallback( + var, ms, mom, lr, rho, momentum, epsilon, grad, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, + momentum=momentum, epsilon=epsilon, grad=grad, + use_locking=use_locking, name=name) + return _op +ResourceApplyRMSProp = tf_export("raw_ops.ResourceApplyRMSProp")(_ops.to_raw_op(resource_apply_rms_prop)) + + +def resource_apply_rms_prop_eager_fallback(var: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceApplyRMSProp_T], rho: Annotated[Any, TV_ResourceApplyRMSProp_T], momentum: Annotated[Any, TV_ResourceApplyRMSProp_T], epsilon: Annotated[Any, TV_ResourceApplyRMSProp_T], grad: Annotated[Any, TV_ResourceApplyRMSProp_T], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, momentum, epsilon, grad) = _inputs_T + var = _ops.convert_to_tensor(var, _dtypes.resource) + ms = _ops.convert_to_tensor(ms, _dtypes.resource) + mom = _ops.convert_to_tensor(mom, _dtypes.resource) + _inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad] + _attrs = ("T", _attr_T, "use_locking", use_locking) + _result = _execute.execute(b"ResourceApplyRMSProp", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_ResourceSparseApplyAdadelta_T = TypeVar("TV_ResourceSparseApplyAdadelta_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyAdadelta_Tindices = TypeVar("TV_ResourceSparseApplyAdadelta_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_adadelta(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], accum_update: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], rho: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], epsilon: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], grad: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], indices: Annotated[Any, TV_ResourceSparseApplyAdadelta_Tindices], use_locking:bool=False, name=None): + r"""var: Should be from a Variable(). + + Args: + var: A `Tensor` of type `resource`. + accum: A `Tensor` of type `resource`. Should be from a Variable(). + accum_update: A `Tensor` of type `resource`. + : Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `lr`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyAdadelta", name, var, accum, accum_update, + lr, rho, epsilon, grad, indices, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_adadelta_eager_fallback( + var, accum, accum_update, lr, rho, epsilon, grad, indices, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyAdadelta", var=var, accum=accum, + accum_update=accum_update, lr=lr, + rho=rho, epsilon=epsilon, grad=grad, + indices=indices, + use_locking=use_locking, name=name) + return _op +ResourceSparseApplyAdadelta = tf_export("raw_ops.ResourceSparseApplyAdadelta")(_ops.to_raw_op(resource_sparse_apply_adadelta)) + + +def resource_sparse_apply_adadelta_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], accum_update: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], rho: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], epsilon: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], grad: Annotated[Any, TV_ResourceSparseApplyAdadelta_T], indices: Annotated[Any, TV_ResourceSparseApplyAdadelta_Tindices], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, epsilon, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource) + _inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyAdadelta", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyAdagrad_T = TypeVar("TV_ResourceSparseApplyAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyAdagrad_Tindices = TypeVar("TV_ResourceSparseApplyAdagrad_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_adagrad(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdagrad_T], grad: Annotated[Any, TV_ResourceSparseApplyAdagrad_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagrad_Tindices], use_locking:bool=False, update_slots:bool=True, name=None): + r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + + That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum)) + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyAdagrad", name, var, accum, lr, grad, + indices, "use_locking", use_locking, "update_slots", update_slots) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_adagrad_eager_fallback( + var, accum, lr, grad, indices, use_locking=use_locking, + update_slots=update_slots, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, + indices=indices, + use_locking=use_locking, + update_slots=update_slots, name=name) + return _op +ResourceSparseApplyAdagrad = tf_export("raw_ops.ResourceSparseApplyAdagrad")(_ops.to_raw_op(resource_sparse_apply_adagrad)) + + +def resource_sparse_apply_adagrad_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdagrad_T], grad: Annotated[Any, TV_ResourceSparseApplyAdagrad_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagrad_Tindices], use_locking: bool, update_slots: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "update_slots", update_slots) + _result = _execute.execute(b"ResourceSparseApplyAdagrad", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyAdagradDA_T = TypeVar("TV_ResourceSparseApplyAdagradDA_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyAdagradDA_Tindices = TypeVar("TV_ResourceSparseApplyAdagradDA_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_adagrad_da(var: Annotated[Any, _atypes.Resource], gradient_accumulator: Annotated[Any, _atypes.Resource], gradient_squared_accumulator: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagradDA_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], l1: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], l2: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking:bool=False, name=None): + r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + gradient_accumulator: A `Tensor` of type `resource`. + Should be from a Variable(). + gradient_squared_accumulator: A `Tensor` of type `resource`. + Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `grad`. + Learning rate. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 regularization. Must be a scalar. + global_step: A `Tensor` of type `int64`. + Training step number. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyAdagradDA", name, var, gradient_accumulator, + gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, + "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_adagrad_da_eager_fallback( + var, gradient_accumulator, gradient_squared_accumulator, grad, + indices, lr, l1, l2, global_step, use_locking=use_locking, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyAdagradDA", var=var, + gradient_accumulator=gradient_accumulator, + gradient_squared_accumulator=gradient_squared_accumulator, + grad=grad, indices=indices, lr=lr, + l1=l1, l2=l2, global_step=global_step, + use_locking=use_locking, name=name) + return _op +ResourceSparseApplyAdagradDA = tf_export("raw_ops.ResourceSparseApplyAdagradDA")(_ops.to_raw_op(resource_sparse_apply_adagrad_da)) + + +def resource_sparse_apply_adagrad_da_eager_fallback(var: Annotated[Any, _atypes.Resource], gradient_accumulator: Annotated[Any, _atypes.Resource], gradient_squared_accumulator: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagradDA_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], l1: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], l2: Annotated[Any, TV_ResourceSparseApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource) + gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource) + global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) + _inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyAdagradDA", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyAdagradV2_T = TypeVar("TV_ResourceSparseApplyAdagradV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyAdagradV2_Tindices = TypeVar("TV_ResourceSparseApplyAdagradV2_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_adagrad_v2(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], epsilon: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], grad: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagradV2_Tindices], use_locking:bool=False, update_slots:bool=True, name=None): + r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + + That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum)) + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `lr`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyAdagradV2", name, var, accum, lr, epsilon, + grad, indices, "use_locking", use_locking, "update_slots", + update_slots) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_adagrad_v2_eager_fallback( + var, accum, lr, epsilon, grad, indices, use_locking=use_locking, + update_slots=update_slots, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyAdagradV2", var=var, accum=accum, lr=lr, + epsilon=epsilon, grad=grad, + indices=indices, + use_locking=use_locking, + update_slots=update_slots, name=name) + return _op +ResourceSparseApplyAdagradV2 = tf_export("raw_ops.ResourceSparseApplyAdagradV2")(_ops.to_raw_op(resource_sparse_apply_adagrad_v2)) + + +def resource_sparse_apply_adagrad_v2_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], epsilon: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], grad: Annotated[Any, TV_ResourceSparseApplyAdagradV2_T], indices: Annotated[Any, TV_ResourceSparseApplyAdagradV2_Tindices], use_locking: bool, update_slots: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, epsilon, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, epsilon, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "update_slots", update_slots) + _result = _execute.execute(b"ResourceSparseApplyAdagradV2", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyCenteredRMSProp_T = TypeVar("TV_ResourceSparseApplyCenteredRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyCenteredRMSProp_Tindices = TypeVar("TV_ResourceSparseApplyCenteredRMSProp_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_centered_rms_prop(var: Annotated[Any, _atypes.Resource], mg: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], indices: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_Tindices], use_locking:bool=False, name=None): + r"""Update '*var' according to the centered RMSProp algorithm. + + The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory. + + Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + mg: A `Tensor` of type `resource`. Should be from a Variable(). + ms: A `Tensor` of type `resource`. Should be from a Variable(). + mom: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `lr`. + epsilon: A `Tensor`. Must have the same type as `lr`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var, ms and mom. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, mg, ms, and mom tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyCenteredRMSProp", name, var, mg, ms, mom, + lr, rho, momentum, epsilon, grad, indices, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_centered_rms_prop_eager_fallback( + var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, + lr=lr, rho=rho, + momentum=momentum, + epsilon=epsilon, grad=grad, + indices=indices, + use_locking=use_locking, + name=name) + return _op +ResourceSparseApplyCenteredRMSProp = tf_export("raw_ops.ResourceSparseApplyCenteredRMSProp")(_ops.to_raw_op(resource_sparse_apply_centered_rms_prop)) + + +def resource_sparse_apply_centered_rms_prop_eager_fallback(var: Annotated[Any, _atypes.Resource], mg: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], rho: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], grad: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_T], indices: Annotated[Any, TV_ResourceSparseApplyCenteredRMSProp_Tindices], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, momentum, epsilon, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + mg = _ops.convert_to_tensor(mg, _dtypes.resource) + ms = _ops.convert_to_tensor(ms, _dtypes.resource) + mom = _ops.convert_to_tensor(mom, _dtypes.resource) + _inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyCenteredRMSProp", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyFtrl_T = TypeVar("TV_ResourceSparseApplyFtrl_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyFtrl_Tindices = TypeVar("TV_ResourceSparseApplyFtrl_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_ftrl(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyFtrl_T], indices: Annotated[Any, TV_ResourceSparseApplyFtrl_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyFtrl_T], l1: Annotated[Any, TV_ResourceSparseApplyFtrl_T], l2: Annotated[Any, TV_ResourceSparseApplyFtrl_T], lr_power: Annotated[Any, TV_ResourceSparseApplyFtrl_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None): + r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. + + That is for rows we have grad for, we update var, accum and linear as follows: + accum_new = accum + grad * grad + linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + linear: A `Tensor` of type `resource`. Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 regularization. Must be a scalar. + lr_power: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyFtrl", name, var, accum, linear, grad, + indices, lr, l1, l2, lr_power, "use_locking", use_locking, + "multiply_linear_by_lr", multiply_linear_by_lr) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_ftrl_eager_fallback( + var, accum, linear, grad, indices, lr, l1, l2, lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyFtrl", var=var, accum=accum, linear=linear, + grad=grad, indices=indices, lr=lr, l1=l1, + l2=l2, lr_power=lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + return _op +ResourceSparseApplyFtrl = tf_export("raw_ops.ResourceSparseApplyFtrl")(_ops.to_raw_op(resource_sparse_apply_ftrl)) + + +def resource_sparse_apply_ftrl_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyFtrl_T], indices: Annotated[Any, TV_ResourceSparseApplyFtrl_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyFtrl_T], l1: Annotated[Any, TV_ResourceSparseApplyFtrl_T], l2: Annotated[Any, TV_ResourceSparseApplyFtrl_T], lr_power: Annotated[Any, TV_ResourceSparseApplyFtrl_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2, lr_power) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + linear = _ops.convert_to_tensor(linear, _dtypes.resource) + _inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, lr_power] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) + _result = _execute.execute(b"ResourceSparseApplyFtrl", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyFtrlV2_T = TypeVar("TV_ResourceSparseApplyFtrlV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyFtrlV2_Tindices = TypeVar("TV_ResourceSparseApplyFtrlV2_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_ftrl_v2(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], indices: Annotated[Any, TV_ResourceSparseApplyFtrlV2_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l1: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l2: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], lr_power: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None): + r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. + + That is for rows we have grad for, we update var, accum and linear as follows: + grad_with_shrinkage = grad + 2 * l2_shrinkage * var + accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + linear += grad_with_shrinkage + + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + linear: A `Tensor` of type `resource`. Should be from a Variable(). + grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `grad`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `grad`. + L2 shrinkage regularization. Must be a scalar. + l2_shrinkage: A `Tensor`. Must have the same type as `grad`. + lr_power: A `Tensor`. Must have the same type as `grad`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyFtrlV2", name, var, accum, linear, grad, + indices, lr, l1, l2, l2_shrinkage, lr_power, "use_locking", + use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_ftrl_v2_eager_fallback( + var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, + lr_power, use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyFtrlV2", var=var, accum=accum, linear=linear, + grad=grad, indices=indices, lr=lr, l1=l1, + l2=l2, l2_shrinkage=l2_shrinkage, + lr_power=lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + return _op +ResourceSparseApplyFtrlV2 = tf_export("raw_ops.ResourceSparseApplyFtrlV2")(_ops.to_raw_op(resource_sparse_apply_ftrl_v2)) + + +def resource_sparse_apply_ftrl_v2_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], linear: Annotated[Any, _atypes.Resource], grad: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], indices: Annotated[Any, TV_ResourceSparseApplyFtrlV2_Tindices], lr: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l1: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l2: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], lr_power: Annotated[Any, TV_ResourceSparseApplyFtrlV2_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + linear = _ops.convert_to_tensor(linear, _dtypes.resource) + _inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) + _result = _execute.execute(b"ResourceSparseApplyFtrlV2", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyKerasMomentum_T = TypeVar("TV_ResourceSparseApplyKerasMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyKerasMomentum_Tindices = TypeVar("TV_ResourceSparseApplyKerasMomentum_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_keras_momentum(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], grad: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], indices: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_Tindices], momentum: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None): + r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + That is for rows we have grad for, we update var and accum as follows: + + accum = accum * momentum - lr * grad + var += accum + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + momentum: A `Tensor`. Must have the same type as `lr`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var + momentum * accum, so in the end, the var you get is actually + var + momentum * accum. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyKerasMomentum", name, var, accum, lr, grad, + indices, momentum, "use_locking", use_locking, "use_nesterov", + use_nesterov) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_keras_momentum_eager_fallback( + var, accum, lr, grad, indices, momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyKerasMomentum", var=var, accum=accum, lr=lr, + grad=grad, indices=indices, + momentum=momentum, + use_locking=use_locking, + use_nesterov=use_nesterov, + name=name) + return _op +ResourceSparseApplyKerasMomentum = tf_export("raw_ops.ResourceSparseApplyKerasMomentum")(_ops.to_raw_op(resource_sparse_apply_keras_momentum)) + + +def resource_sparse_apply_keras_momentum_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], grad: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], indices: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_Tindices], momentum: Annotated[Any, TV_ResourceSparseApplyKerasMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad, momentum) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad, indices, momentum] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "use_nesterov", use_nesterov) + _result = _execute.execute(b"ResourceSparseApplyKerasMomentum", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyMomentum_T = TypeVar("TV_ResourceSparseApplyMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyMomentum_Tindices = TypeVar("TV_ResourceSparseApplyMomentum_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_momentum(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyMomentum_T], grad: Annotated[Any, TV_ResourceSparseApplyMomentum_T], indices: Annotated[Any, TV_ResourceSparseApplyMomentum_Tindices], momentum: Annotated[Any, TV_ResourceSparseApplyMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None): + r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + That is for rows we have grad for, we update var and accum as follows: + + accum = accum * momentum + grad + var -= lr * accum + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + momentum: A `Tensor`. Must have the same type as `lr`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var - lr * momentum * accum, so in the end, the var you get is actually + var - lr * momentum * accum. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyMomentum", name, var, accum, lr, grad, + indices, momentum, "use_locking", use_locking, "use_nesterov", + use_nesterov) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_momentum_eager_fallback( + var, accum, lr, grad, indices, momentum, use_locking=use_locking, + use_nesterov=use_nesterov, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, + indices=indices, momentum=momentum, + use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + return _op +ResourceSparseApplyMomentum = tf_export("raw_ops.ResourceSparseApplyMomentum")(_ops.to_raw_op(resource_sparse_apply_momentum)) + + +def resource_sparse_apply_momentum_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyMomentum_T], grad: Annotated[Any, TV_ResourceSparseApplyMomentum_T], indices: Annotated[Any, TV_ResourceSparseApplyMomentum_Tindices], momentum: Annotated[Any, TV_ResourceSparseApplyMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, grad, momentum) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, grad, indices, momentum] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking, "use_nesterov", use_nesterov) + _result = _execute.execute(b"ResourceSparseApplyMomentum", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyProximalAdagrad_T = TypeVar("TV_ResourceSparseApplyProximalAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyProximalAdagrad_Tindices = TypeVar("TV_ResourceSparseApplyProximalAdagrad_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_proximal_adagrad(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], l1: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], l2: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], grad: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], indices: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_Tindices], use_locking:bool=False, name=None): + r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + + That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + prox_v = var + prox_v -= lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + accum: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Learning rate. Must be a scalar. + l1: A `Tensor`. Must have the same type as `lr`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `lr`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyProximalAdagrad", name, var, accum, lr, l1, + l2, grad, indices, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_proximal_adagrad_eager_fallback( + var, accum, lr, l1, l2, grad, indices, use_locking=use_locking, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyProximalAdagrad", var=var, accum=accum, lr=lr, + l1=l1, l2=l2, grad=grad, + indices=indices, + use_locking=use_locking, + name=name) + return _op +ResourceSparseApplyProximalAdagrad = tf_export("raw_ops.ResourceSparseApplyProximalAdagrad")(_ops.to_raw_op(resource_sparse_apply_proximal_adagrad)) + + +def resource_sparse_apply_proximal_adagrad_eager_fallback(var: Annotated[Any, _atypes.Resource], accum: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], l1: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], l2: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], grad: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_T], indices: Annotated[Any, TV_ResourceSparseApplyProximalAdagrad_Tindices], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, l1, l2, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + accum = _ops.convert_to_tensor(accum, _dtypes.resource) + _inputs_flat = [var, accum, lr, l1, l2, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyProximalAdagrad", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyProximalGradientDescent_T = TypeVar("TV_ResourceSparseApplyProximalGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyProximalGradientDescent_Tindices = TypeVar("TV_ResourceSparseApplyProximalGradientDescent_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_proximal_gradient_descent(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], grad: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], indices: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_Tindices], use_locking:bool=False, name=None): + r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate. + + That is for rows we have grad for, we update var as follows: + prox_v = var - alpha * grad + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `alpha`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `alpha`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `alpha`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyProximalGradientDescent", name, var, alpha, + l1, l2, grad, indices, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_proximal_gradient_descent_eager_fallback( + var, alpha, l1, l2, grad, indices, use_locking=use_locking, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyProximalGradientDescent", var=var, alpha=alpha, + l1=l1, l2=l2, grad=grad, + indices=indices, + use_locking=use_locking, + name=name) + return _op +ResourceSparseApplyProximalGradientDescent = tf_export("raw_ops.ResourceSparseApplyProximalGradientDescent")(_ops.to_raw_op(resource_sparse_apply_proximal_gradient_descent)) + + +def resource_sparse_apply_proximal_gradient_descent_eager_fallback(var: Annotated[Any, _atypes.Resource], alpha: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], l1: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], l2: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], grad: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_T], indices: Annotated[Any, TV_ResourceSparseApplyProximalGradientDescent_Tindices], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (alpha, l1, l2, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + _inputs_flat = [var, alpha, l1, l2, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyProximalGradientDescent", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_ResourceSparseApplyRMSProp_T = TypeVar("TV_ResourceSparseApplyRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ResourceSparseApplyRMSProp_Tindices = TypeVar("TV_ResourceSparseApplyRMSProp_Tindices", _atypes.Int32, _atypes.Int64) + +def resource_sparse_apply_rms_prop(var: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], rho: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], momentum: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], epsilon: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], grad: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], indices: Annotated[Any, TV_ResourceSparseApplyRMSProp_Tindices], use_locking:bool=False, name=None): + r"""Update '*var' according to the RMSProp algorithm. + + Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom + + Args: + var: A `Tensor` of type `resource`. Should be from a Variable(). + ms: A `Tensor` of type `resource`. Should be from a Variable(). + mom: A `Tensor` of type `resource`. Should be from a Variable(). + lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `lr`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `lr`. + epsilon: A `Tensor`. Must have the same type as `lr`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `lr`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var, ms and mom. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, ms, and mom tensors is protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ResourceSparseApplyRMSProp", name, var, ms, mom, lr, rho, + momentum, epsilon, grad, indices, "use_locking", use_locking) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return resource_sparse_apply_rms_prop_eager_fallback( + var, ms, mom, lr, rho, momentum, epsilon, grad, indices, + use_locking=use_locking, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ResourceSparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, + momentum=momentum, epsilon=epsilon, + grad=grad, indices=indices, + use_locking=use_locking, name=name) + return _op +ResourceSparseApplyRMSProp = tf_export("raw_ops.ResourceSparseApplyRMSProp")(_ops.to_raw_op(resource_sparse_apply_rms_prop)) + + +def resource_sparse_apply_rms_prop_eager_fallback(var: Annotated[Any, _atypes.Resource], ms: Annotated[Any, _atypes.Resource], mom: Annotated[Any, _atypes.Resource], lr: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], rho: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], momentum: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], epsilon: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], grad: Annotated[Any, TV_ResourceSparseApplyRMSProp_T], indices: Annotated[Any, TV_ResourceSparseApplyRMSProp_Tindices], use_locking: bool, name, ctx): + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lr, rho, momentum, epsilon, grad) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + var = _ops.convert_to_tensor(var, _dtypes.resource) + ms = _ops.convert_to_tensor(ms, _dtypes.resource) + mom = _ops.convert_to_tensor(mom, _dtypes.resource) + _inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", + use_locking) + _result = _execute.execute(b"ResourceSparseApplyRMSProp", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_SparseApplyAdadelta_T = TypeVar("TV_SparseApplyAdadelta_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyAdadelta_Tindices = TypeVar("TV_SparseApplyAdadelta_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_adadelta(var: Annotated[Any, TV_SparseApplyAdadelta_T], accum: Annotated[Any, TV_SparseApplyAdadelta_T], accum_update: Annotated[Any, TV_SparseApplyAdadelta_T], lr: Annotated[Any, TV_SparseApplyAdadelta_T], rho: Annotated[Any, TV_SparseApplyAdadelta_T], epsilon: Annotated[Any, TV_SparseApplyAdadelta_T], grad: Annotated[Any, TV_SparseApplyAdadelta_T], indices: Annotated[Any, TV_SparseApplyAdadelta_Tindices], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyAdadelta_T]: + r"""var: Should be from a Variable(). + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + accum_update: A mutable `Tensor`. Must have the same type as `var`. + : Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay factor. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyAdadelta", var=var, accum=accum, + accum_update=accum_update, lr=lr, rho=rho, + epsilon=epsilon, grad=grad, indices=indices, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyAdadelta", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyAdadelta = tf_export("raw_ops.SparseApplyAdadelta")(_ops.to_raw_op(sparse_apply_adadelta)) + + +def sparse_apply_adadelta_eager_fallback(var: Annotated[Any, TV_SparseApplyAdadelta_T], accum: Annotated[Any, TV_SparseApplyAdadelta_T], accum_update: Annotated[Any, TV_SparseApplyAdadelta_T], lr: Annotated[Any, TV_SparseApplyAdadelta_T], rho: Annotated[Any, TV_SparseApplyAdadelta_T], epsilon: Annotated[Any, TV_SparseApplyAdadelta_T], grad: Annotated[Any, TV_SparseApplyAdadelta_T], indices: Annotated[Any, TV_SparseApplyAdadelta_Tindices], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyAdadelta_T]: + raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyAdagrad_T = TypeVar("TV_SparseApplyAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyAdagrad_Tindices = TypeVar("TV_SparseApplyAdagrad_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_adagrad(var: Annotated[Any, TV_SparseApplyAdagrad_T], accum: Annotated[Any, TV_SparseApplyAdagrad_T], lr: Annotated[Any, TV_SparseApplyAdagrad_T], grad: Annotated[Any, TV_SparseApplyAdagrad_T], indices: Annotated[Any, TV_SparseApplyAdagrad_Tindices], use_locking:bool=False, update_slots:bool=True, name=None) -> Annotated[Any, TV_SparseApplyAdagrad_T]: + r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + + That is for rows we have grad for, we update var and accum as follows: + $$accum += grad * grad$$ + $$var -= lr * grad * (1 / sqrt(accum))$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, + indices=indices, use_locking=use_locking, + update_slots=update_slots, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking"), "update_slots", + _op._get_attr_bool("update_slots")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyAdagrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyAdagrad = tf_export("raw_ops.SparseApplyAdagrad")(_ops.to_raw_op(sparse_apply_adagrad)) + + +def sparse_apply_adagrad_eager_fallback(var: Annotated[Any, TV_SparseApplyAdagrad_T], accum: Annotated[Any, TV_SparseApplyAdagrad_T], lr: Annotated[Any, TV_SparseApplyAdagrad_T], grad: Annotated[Any, TV_SparseApplyAdagrad_T], indices: Annotated[Any, TV_SparseApplyAdagrad_Tindices], use_locking: bool, update_slots: bool, name, ctx) -> Annotated[Any, TV_SparseApplyAdagrad_T]: + raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyAdagradDA_T = TypeVar("TV_SparseApplyAdagradDA_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyAdagradDA_Tindices = TypeVar("TV_SparseApplyAdagradDA_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_adagrad_da(var: Annotated[Any, TV_SparseApplyAdagradDA_T], gradient_accumulator: Annotated[Any, TV_SparseApplyAdagradDA_T], gradient_squared_accumulator: Annotated[Any, TV_SparseApplyAdagradDA_T], grad: Annotated[Any, TV_SparseApplyAdagradDA_T], indices: Annotated[Any, TV_SparseApplyAdagradDA_Tindices], lr: Annotated[Any, TV_SparseApplyAdagradDA_T], l1: Annotated[Any, TV_SparseApplyAdagradDA_T], l2: Annotated[Any, TV_SparseApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyAdagradDA_T]: + r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + global_step: A `Tensor` of type `int64`. + Training step number. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyAdagradDA", var=var, + gradient_accumulator=gradient_accumulator, + gradient_squared_accumulator=gradient_squared_accumulator, + grad=grad, indices=indices, lr=lr, l1=l1, + l2=l2, global_step=global_step, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyAdagradDA", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyAdagradDA = tf_export("raw_ops.SparseApplyAdagradDA")(_ops.to_raw_op(sparse_apply_adagrad_da)) + + +def sparse_apply_adagrad_da_eager_fallback(var: Annotated[Any, TV_SparseApplyAdagradDA_T], gradient_accumulator: Annotated[Any, TV_SparseApplyAdagradDA_T], gradient_squared_accumulator: Annotated[Any, TV_SparseApplyAdagradDA_T], grad: Annotated[Any, TV_SparseApplyAdagradDA_T], indices: Annotated[Any, TV_SparseApplyAdagradDA_Tindices], lr: Annotated[Any, TV_SparseApplyAdagradDA_T], l1: Annotated[Any, TV_SparseApplyAdagradDA_T], l2: Annotated[Any, TV_SparseApplyAdagradDA_T], global_step: Annotated[Any, _atypes.Int64], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyAdagradDA_T]: + raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyAdagradV2_T = TypeVar("TV_SparseApplyAdagradV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyAdagradV2_Tindices = TypeVar("TV_SparseApplyAdagradV2_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_adagrad_v2(var: Annotated[Any, TV_SparseApplyAdagradV2_T], accum: Annotated[Any, TV_SparseApplyAdagradV2_T], lr: Annotated[Any, TV_SparseApplyAdagradV2_T], epsilon: Annotated[Any, TV_SparseApplyAdagradV2_T], grad: Annotated[Any, TV_SparseApplyAdagradV2_T], indices: Annotated[Any, TV_SparseApplyAdagradV2_Tindices], use_locking:bool=False, update_slots:bool=True, name=None) -> Annotated[Any, TV_SparseApplyAdagradV2_T]: + r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + + That is for rows we have grad for, we update var and accum as follows: + $$accum += grad * grad$$ + $$var -= lr * grad * (1 / sqrt(accum))$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + epsilon: A `Tensor`. Must have the same type as `var`. + Constant factor. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + update_slots: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if update_slots is None: + update_slots = True + update_slots = _execute.make_bool(update_slots, "update_slots") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyAdagradV2", var=var, accum=accum, lr=lr, epsilon=epsilon, + grad=grad, indices=indices, + use_locking=use_locking, + update_slots=update_slots, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking"), "update_slots", + _op._get_attr_bool("update_slots")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyAdagradV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyAdagradV2 = tf_export("raw_ops.SparseApplyAdagradV2")(_ops.to_raw_op(sparse_apply_adagrad_v2)) + + +def sparse_apply_adagrad_v2_eager_fallback(var: Annotated[Any, TV_SparseApplyAdagradV2_T], accum: Annotated[Any, TV_SparseApplyAdagradV2_T], lr: Annotated[Any, TV_SparseApplyAdagradV2_T], epsilon: Annotated[Any, TV_SparseApplyAdagradV2_T], grad: Annotated[Any, TV_SparseApplyAdagradV2_T], indices: Annotated[Any, TV_SparseApplyAdagradV2_Tindices], use_locking: bool, update_slots: bool, name, ctx) -> Annotated[Any, TV_SparseApplyAdagradV2_T]: + raise RuntimeError("sparse_apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyCenteredRMSProp_T = TypeVar("TV_SparseApplyCenteredRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyCenteredRMSProp_Tindices = TypeVar("TV_SparseApplyCenteredRMSProp_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_centered_rms_prop(var: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], mg: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], ms: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], mom: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], lr: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], rho: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], grad: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], indices: Annotated[Any, TV_SparseApplyCenteredRMSProp_Tindices], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyCenteredRMSProp_T]: + r"""Update '*var' according to the centered RMSProp algorithm. + + The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory. + + Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + + $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + $$var <- var - mom$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + mg: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + ms: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + mom: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `var`. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var, ms and mom. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, mg, ms, and mom tensors is + protected by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, + rho=rho, momentum=momentum, + epsilon=epsilon, grad=grad, + indices=indices, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyCenteredRMSProp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyCenteredRMSProp = tf_export("raw_ops.SparseApplyCenteredRMSProp")(_ops.to_raw_op(sparse_apply_centered_rms_prop)) + + +def sparse_apply_centered_rms_prop_eager_fallback(var: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], mg: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], ms: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], mom: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], lr: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], rho: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], momentum: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], epsilon: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], grad: Annotated[Any, TV_SparseApplyCenteredRMSProp_T], indices: Annotated[Any, TV_SparseApplyCenteredRMSProp_Tindices], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyCenteredRMSProp_T]: + raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyFtrl_T = TypeVar("TV_SparseApplyFtrl_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyFtrl_Tindices = TypeVar("TV_SparseApplyFtrl_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_ftrl(var: Annotated[Any, TV_SparseApplyFtrl_T], accum: Annotated[Any, TV_SparseApplyFtrl_T], linear: Annotated[Any, TV_SparseApplyFtrl_T], grad: Annotated[Any, TV_SparseApplyFtrl_T], indices: Annotated[Any, TV_SparseApplyFtrl_Tindices], lr: Annotated[Any, TV_SparseApplyFtrl_T], l1: Annotated[Any, TV_SparseApplyFtrl_T], l2: Annotated[Any, TV_SparseApplyFtrl_T], lr_power: Annotated[Any, TV_SparseApplyFtrl_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None) -> Annotated[Any, TV_SparseApplyFtrl_T]: + r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. + + That is for rows we have grad for, we update var, accum and linear as follows: + $$accum_new = accum + grad * grad$$ + $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ + $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ + $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ + $$accum = accum_{new}$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + linear: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + lr_power: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, + indices=indices, lr=lr, l1=l1, l2=l2, + lr_power=lr_power, use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", + _op._get_attr_bool("multiply_linear_by_lr")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyFtrl", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyFtrl = tf_export("raw_ops.SparseApplyFtrl")(_ops.to_raw_op(sparse_apply_ftrl)) + + +def sparse_apply_ftrl_eager_fallback(var: Annotated[Any, TV_SparseApplyFtrl_T], accum: Annotated[Any, TV_SparseApplyFtrl_T], linear: Annotated[Any, TV_SparseApplyFtrl_T], grad: Annotated[Any, TV_SparseApplyFtrl_T], indices: Annotated[Any, TV_SparseApplyFtrl_Tindices], lr: Annotated[Any, TV_SparseApplyFtrl_T], l1: Annotated[Any, TV_SparseApplyFtrl_T], l2: Annotated[Any, TV_SparseApplyFtrl_T], lr_power: Annotated[Any, TV_SparseApplyFtrl_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx) -> Annotated[Any, TV_SparseApplyFtrl_T]: + raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyFtrlV2_T = TypeVar("TV_SparseApplyFtrlV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyFtrlV2_Tindices = TypeVar("TV_SparseApplyFtrlV2_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_ftrl_v2(var: Annotated[Any, TV_SparseApplyFtrlV2_T], accum: Annotated[Any, TV_SparseApplyFtrlV2_T], linear: Annotated[Any, TV_SparseApplyFtrlV2_T], grad: Annotated[Any, TV_SparseApplyFtrlV2_T], indices: Annotated[Any, TV_SparseApplyFtrlV2_Tindices], lr: Annotated[Any, TV_SparseApplyFtrlV2_T], l1: Annotated[Any, TV_SparseApplyFtrlV2_T], l2: Annotated[Any, TV_SparseApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_SparseApplyFtrlV2_T], lr_power: Annotated[Any, TV_SparseApplyFtrlV2_T], use_locking:bool=False, multiply_linear_by_lr:bool=False, name=None) -> Annotated[Any, TV_SparseApplyFtrlV2_T]: + r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. + + That is for rows we have grad for, we update var, accum and linear as follows: + grad_with_shrinkage = grad + 2 * l2_shrinkage * var + accum_new = accum + grad * grad + linear += grad_with_shrinkage - + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + linear: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 shrinkage regularization. Must be a scalar. + l2_shrinkage: A `Tensor`. Must have the same type as `var`. + lr_power: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + multiply_linear_by_lr: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if multiply_linear_by_lr is None: + multiply_linear_by_lr = False + multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, + indices=indices, lr=lr, l1=l1, l2=l2, + l2_shrinkage=l2_shrinkage, lr_power=lr_power, + use_locking=use_locking, + multiply_linear_by_lr=multiply_linear_by_lr, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", + _op._get_attr_bool("multiply_linear_by_lr")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyFtrlV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyFtrlV2 = tf_export("raw_ops.SparseApplyFtrlV2")(_ops.to_raw_op(sparse_apply_ftrl_v2)) + + +def sparse_apply_ftrl_v2_eager_fallback(var: Annotated[Any, TV_SparseApplyFtrlV2_T], accum: Annotated[Any, TV_SparseApplyFtrlV2_T], linear: Annotated[Any, TV_SparseApplyFtrlV2_T], grad: Annotated[Any, TV_SparseApplyFtrlV2_T], indices: Annotated[Any, TV_SparseApplyFtrlV2_Tindices], lr: Annotated[Any, TV_SparseApplyFtrlV2_T], l1: Annotated[Any, TV_SparseApplyFtrlV2_T], l2: Annotated[Any, TV_SparseApplyFtrlV2_T], l2_shrinkage: Annotated[Any, TV_SparseApplyFtrlV2_T], lr_power: Annotated[Any, TV_SparseApplyFtrlV2_T], use_locking: bool, multiply_linear_by_lr: bool, name, ctx) -> Annotated[Any, TV_SparseApplyFtrlV2_T]: + raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyMomentum_T = TypeVar("TV_SparseApplyMomentum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyMomentum_Tindices = TypeVar("TV_SparseApplyMomentum_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_momentum(var: Annotated[Any, TV_SparseApplyMomentum_T], accum: Annotated[Any, TV_SparseApplyMomentum_T], lr: Annotated[Any, TV_SparseApplyMomentum_T], grad: Annotated[Any, TV_SparseApplyMomentum_T], indices: Annotated[Any, TV_SparseApplyMomentum_Tindices], momentum: Annotated[Any, TV_SparseApplyMomentum_T], use_locking:bool=False, use_nesterov:bool=False, name=None) -> Annotated[Any, TV_SparseApplyMomentum_T]: + r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. + + Set use_nesterov = True if you want to use Nesterov momentum. + + That is for rows we have grad for, we update var and accum as follows: + + $$accum = accum * momentum + grad$$ + $$var -= lr * accum$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + momentum: A `Tensor`. Must have the same type as `var`. + Momentum. Must be a scalar. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var and accum tensors will be protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + use_nesterov: An optional `bool`. Defaults to `False`. + If `True`, the tensor passed to compute grad will be + var - lr * momentum * accum, so in the end, the var you get is actually + var - lr * momentum * accum. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + if use_nesterov is None: + use_nesterov = False + use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, + indices=indices, momentum=momentum, + use_locking=use_locking, + use_nesterov=use_nesterov, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking"), "use_nesterov", + _op._get_attr_bool("use_nesterov")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyMomentum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyMomentum = tf_export("raw_ops.SparseApplyMomentum")(_ops.to_raw_op(sparse_apply_momentum)) + + +def sparse_apply_momentum_eager_fallback(var: Annotated[Any, TV_SparseApplyMomentum_T], accum: Annotated[Any, TV_SparseApplyMomentum_T], lr: Annotated[Any, TV_SparseApplyMomentum_T], grad: Annotated[Any, TV_SparseApplyMomentum_T], indices: Annotated[Any, TV_SparseApplyMomentum_Tindices], momentum: Annotated[Any, TV_SparseApplyMomentum_T], use_locking: bool, use_nesterov: bool, name, ctx) -> Annotated[Any, TV_SparseApplyMomentum_T]: + raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyProximalAdagrad_T = TypeVar("TV_SparseApplyProximalAdagrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyProximalAdagrad_Tindices = TypeVar("TV_SparseApplyProximalAdagrad_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_proximal_adagrad(var: Annotated[Any, TV_SparseApplyProximalAdagrad_T], accum: Annotated[Any, TV_SparseApplyProximalAdagrad_T], lr: Annotated[Any, TV_SparseApplyProximalAdagrad_T], l1: Annotated[Any, TV_SparseApplyProximalAdagrad_T], l2: Annotated[Any, TV_SparseApplyProximalAdagrad_T], grad: Annotated[Any, TV_SparseApplyProximalAdagrad_T], indices: Annotated[Any, TV_SparseApplyProximalAdagrad_Tindices], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyProximalAdagrad_T]: + r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + + That is for rows we have grad for, we update var and accum as follows: + $$accum += grad * grad$$ + $$prox_v = var$$ + $$prox_v -= lr * grad * (1 / sqrt(accum))$$ + $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + accum: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Learning rate. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, updating of the var and accum tensors will be protected by + a lock; otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, + l2=l2, grad=grad, indices=indices, + use_locking=use_locking, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyProximalAdagrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyProximalAdagrad = tf_export("raw_ops.SparseApplyProximalAdagrad")(_ops.to_raw_op(sparse_apply_proximal_adagrad)) + + +def sparse_apply_proximal_adagrad_eager_fallback(var: Annotated[Any, TV_SparseApplyProximalAdagrad_T], accum: Annotated[Any, TV_SparseApplyProximalAdagrad_T], lr: Annotated[Any, TV_SparseApplyProximalAdagrad_T], l1: Annotated[Any, TV_SparseApplyProximalAdagrad_T], l2: Annotated[Any, TV_SparseApplyProximalAdagrad_T], grad: Annotated[Any, TV_SparseApplyProximalAdagrad_T], indices: Annotated[Any, TV_SparseApplyProximalAdagrad_Tindices], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyProximalAdagrad_T]: + raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyProximalGradientDescent_T = TypeVar("TV_SparseApplyProximalGradientDescent_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyProximalGradientDescent_Tindices = TypeVar("TV_SparseApplyProximalGradientDescent_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_proximal_gradient_descent(var: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], alpha: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], l1: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], l2: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], grad: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], indices: Annotated[Any, TV_SparseApplyProximalGradientDescent_Tindices], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyProximalGradientDescent_T]: + r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate. + + That is for rows we have grad for, we update var as follows: + $$prox_v = var - alpha * grad$$ + $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + alpha: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + l1: A `Tensor`. Must have the same type as `var`. + L1 regularization. Must be a scalar. + l2: A `Tensor`. Must have the same type as `var`. + L2 regularization. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var and accum. + use_locking: An optional `bool`. Defaults to `False`. + If True, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, + l2=l2, grad=grad, + indices=indices, + use_locking=use_locking, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyProximalGradientDescent", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyProximalGradientDescent = tf_export("raw_ops.SparseApplyProximalGradientDescent")(_ops.to_raw_op(sparse_apply_proximal_gradient_descent)) + + +def sparse_apply_proximal_gradient_descent_eager_fallback(var: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], alpha: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], l1: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], l2: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], grad: Annotated[Any, TV_SparseApplyProximalGradientDescent_T], indices: Annotated[Any, TV_SparseApplyProximalGradientDescent_Tindices], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyProximalGradientDescent_T]: + raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") + +TV_SparseApplyRMSProp_T = TypeVar("TV_SparseApplyRMSProp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseApplyRMSProp_Tindices = TypeVar("TV_SparseApplyRMSProp_Tindices", _atypes.Int32, _atypes.Int64) + +def sparse_apply_rms_prop(var: Annotated[Any, TV_SparseApplyRMSProp_T], ms: Annotated[Any, TV_SparseApplyRMSProp_T], mom: Annotated[Any, TV_SparseApplyRMSProp_T], lr: Annotated[Any, TV_SparseApplyRMSProp_T], rho: Annotated[Any, TV_SparseApplyRMSProp_T], momentum: Annotated[Any, TV_SparseApplyRMSProp_T], epsilon: Annotated[Any, TV_SparseApplyRMSProp_T], grad: Annotated[Any, TV_SparseApplyRMSProp_T], indices: Annotated[Any, TV_SparseApplyRMSProp_Tindices], use_locking:bool=False, name=None) -> Annotated[Any, TV_SparseApplyRMSProp_T]: + r"""Update '*var' according to the RMSProp algorithm. + + Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero. + + mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + + $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + $$var <- var - mom$$ + + Args: + var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + Should be from a Variable(). + ms: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + mom: A mutable `Tensor`. Must have the same type as `var`. + Should be from a Variable(). + lr: A `Tensor`. Must have the same type as `var`. + Scaling factor. Must be a scalar. + rho: A `Tensor`. Must have the same type as `var`. + Decay rate. Must be a scalar. + momentum: A `Tensor`. Must have the same type as `var`. + epsilon: A `Tensor`. Must have the same type as `var`. + Ridge term. Must be a scalar. + grad: A `Tensor`. Must have the same type as `var`. The gradient. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into the first dimension of var, ms and mom. + use_locking: An optional `bool`. Defaults to `False`. + If `True`, updating of the var, ms, and mom tensors is protected + by a lock; otherwise the behavior is undefined, but may exhibit less + contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `var`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") + # Add nodes to the TensorFlow graph. + if use_locking is None: + use_locking = False + use_locking = _execute.make_bool(use_locking, "use_locking") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, + momentum=momentum, epsilon=epsilon, grad=grad, + indices=indices, use_locking=use_locking, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "use_locking", + _op._get_attr_bool("use_locking")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseApplyRMSProp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseApplyRMSProp = tf_export("raw_ops.SparseApplyRMSProp")(_ops.to_raw_op(sparse_apply_rms_prop)) + + +def sparse_apply_rms_prop_eager_fallback(var: Annotated[Any, TV_SparseApplyRMSProp_T], ms: Annotated[Any, TV_SparseApplyRMSProp_T], mom: Annotated[Any, TV_SparseApplyRMSProp_T], lr: Annotated[Any, TV_SparseApplyRMSProp_T], rho: Annotated[Any, TV_SparseApplyRMSProp_T], momentum: Annotated[Any, TV_SparseApplyRMSProp_T], epsilon: Annotated[Any, TV_SparseApplyRMSProp_T], grad: Annotated[Any, TV_SparseApplyRMSProp_T], indices: Annotated[Any, TV_SparseApplyRMSProp_Tindices], use_locking: bool, name, ctx) -> Annotated[Any, TV_SparseApplyRMSProp_T]: + raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_uniform_quant_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_uniform_quant_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef163f90c504b7367f600470c11433245b6e878 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_uniform_quant_ops.py @@ -0,0 +1,1767 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_UniformDequantize_Tin = TypeVar("TV_UniformDequantize_Tin", _atypes.QInt32, _atypes.QInt8) +TV_UniformDequantize_Tout = TypeVar("TV_UniformDequantize_Tout", bound=_atypes.Float32) + +def uniform_dequantize(input: Annotated[Any, TV_UniformDequantize_Tin], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformDequantize_Tout, quantization_min_val: int, quantization_max_val: int, quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformDequantize_Tout]: + r"""Perform dequantization on the quantized Tensor `input`. + + Given quantized `input` which was quantized using `scales` and `zero_points`, performs dequantization using the formula: + dequantized_data = (quantized_data - zero_point) * scale. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `qint32`. + Must be a Tensor of Tin. + scales: A `Tensor` of type `float32`. + The float value(s) used as scale(s) when quantizing original data that input represents. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point(s) when quantizing original data that input represents. + Same shape condition as scales. + Tout: A `tf.DType` from: `tf.float32`. + The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 + quantization_min_val: An `int`. + The quantization min value that was used when input was quantized. + The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + quantization_max_val: An `int`. + The quantization max value that was used when input was quantized. + The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + `(Tout max)` for both narrow range and not narrow range. + For example, if Tin is qint8, this is set to 127. + quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformDequantize", name, input, scales, zero_points, "Tout", + Tout, "quantization_axis", quantization_axis, "quantization_min_val", + quantization_min_val, "quantization_max_val", quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_dequantize_eager_fallback( + input, scales, zero_points, Tout=Tout, + quantization_axis=quantization_axis, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformDequantize", input=input, scales=scales, + zero_points=zero_points, Tout=Tout, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, + quantization_axis=quantization_axis, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op._get_attr_type("Tin"), "Tout", + _op._get_attr_type("Tout"), "quantization_axis", + _op._get_attr_int("quantization_axis"), "quantization_min_val", + _op._get_attr_int("quantization_min_val"), + "quantization_max_val", + _op._get_attr_int("quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformDequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformDequantize = tf_export("raw_ops.UniformDequantize")(_ops.to_raw_op(uniform_dequantize)) + + +def uniform_dequantize_eager_fallback(input: Annotated[Any, TV_UniformDequantize_Tin], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformDequantize_Tout, quantization_min_val: int, quantization_max_val: int, quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformDequantize_Tout]: + Tout = _execute.make_type(Tout, "Tout") + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _attr_Tin, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.qint32, ]) + scales = _ops.convert_to_tensor(scales, _dtypes.float32) + zero_points = _ops.convert_to_tensor(zero_points, _dtypes.int32) + _inputs_flat = [input, scales, zero_points] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "quantization_axis", + quantization_axis, "quantization_min_val", quantization_min_val, + "quantization_max_val", quantization_max_val) + _result = _execute.execute(b"UniformDequantize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformDequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantize_Tin = TypeVar("TV_UniformQuantize_Tin", bound=_atypes.Float32) +TV_UniformQuantize_Tout = TypeVar("TV_UniformQuantize_Tout", _atypes.QInt32, _atypes.QInt8) + +def uniform_quantize(input: Annotated[Any, TV_UniformQuantize_Tin], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantize_Tout, quantization_min_val: int, quantization_max_val: int, quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantize_Tout]: + r"""Perform quantization on Tensor `input`. + + Given `input`, `scales` and `zero_points`, performs quantization using the formula: + quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point + + Args: + input: A `Tensor`. Must be one of the following types: `float32`. + Must be a Tensor of Tin. + scales: A `Tensor` of type `float32`. + The float value(s) to use as scale(s) to quantize `input`. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + zero_points: A `Tensor` of type `int32`. + The int32 value(s) to use as zero_point(s) to quantize `input`. + Same shape condition as scales. + Tout: A `tf.DType` from: `tf.qint8, tf.qint32`. + The type of output Tensor. A tf.DType from: tf.float32 + quantization_min_val: An `int`. + The quantization min value to quantize `input`. + The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + quantization_max_val: An `int`. + The quantization max value to quantize `input`. + The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + `(Tout max)` for both narrow range and not narrow range. + For example, if Tin is qint8, this is set to 127. + quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantize", name, input, scales, zero_points, "Tout", + Tout, "quantization_axis", quantization_axis, "quantization_min_val", + quantization_min_val, "quantization_max_val", quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantize_eager_fallback( + input, scales, zero_points, Tout=Tout, + quantization_axis=quantization_axis, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantize", input=input, scales=scales, + zero_points=zero_points, Tout=Tout, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, + quantization_axis=quantization_axis, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op._get_attr_type("Tin"), "Tout", + _op._get_attr_type("Tout"), "quantization_axis", + _op._get_attr_int("quantization_axis"), "quantization_min_val", + _op._get_attr_int("quantization_min_val"), + "quantization_max_val", + _op._get_attr_int("quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantize = tf_export("raw_ops.UniformQuantize")(_ops.to_raw_op(uniform_quantize)) + + +def uniform_quantize_eager_fallback(input: Annotated[Any, TV_UniformQuantize_Tin], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantize_Tout, quantization_min_val: int, quantization_max_val: int, quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantize_Tout]: + Tout = _execute.make_type(Tout, "Tout") + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _attr_Tin, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, ]) + scales = _ops.convert_to_tensor(scales, _dtypes.float32) + zero_points = _ops.convert_to_tensor(zero_points, _dtypes.int32) + _inputs_flat = [input, scales, zero_points] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "quantization_axis", + quantization_axis, "quantization_min_val", quantization_min_val, + "quantization_max_val", quantization_max_val) + _result = _execute.execute(b"UniformQuantize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedAdd_T = TypeVar("TV_UniformQuantizedAdd_T", bound=_atypes.QInt32) + +def uniform_quantized_add(lhs: Annotated[Any, TV_UniformQuantizedAdd_T], rhs: Annotated[Any, TV_UniformQuantizedAdd_T], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, lhs_quantization_axis:int=-1, rhs_quantization_axis:int=-1, output_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedAdd_T]: + r"""Perform quantized add of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`. + + Given quantized `lhs` and quantized `rhs`, performs quantized add on `lhs` and `rhs` to make quantized `output`. + + `UniformQuantizedAdd` follows Numpy broadcasting rules. + The two input array shapes are compared element-wise. + Starting with the trailing dimensions, the two dimensions either have to be equal or one of them needs to be 1. + + `lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: + ``` + quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val) + ``` + `output` is also quantized, using the same formula. + + If `lhs` and `output` is both per-axis quantized, the quantization axis must match. + Also, if `rhs` and `output` is both per-axis quantized, the quantization axis must match. + *Match* means the axis must match when adding, regarding the broadcasting. + i.e. For both operands `lhs` and `rhs`, + if `operand.quantization_axis` >= 0 and `output.quantization_axis` >= 0, + `operand.dims` - `operand.quantization_axis` must be equal to `output.dims` - `output.quantization_axis`. + + Args: + lhs: A `Tensor`. Must be one of the following types: `qint32`. + Must be a quantized tensor. + rhs: A `Tensor`. Must have the same type as `lhs`. + Must be a quantized tensor. + lhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale factors when quantizing the original data that `lhs` represents. + lhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that `lhs` represents. + Must have same shape with `lhs_scales`. + rhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale factors when quantizing the original data that `rhs` represents. + rhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that `rhs` represents. + Must have same shape with `rhs_scales`. + output_scales: A `Tensor` of type `float32`. + The float value(s) to use as scale factors when quantizing original data that `output` represents. + output_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that output represents. + Must have same shape with `output_scales`. + lhs_quantization_min_val: An `int`. + The min value of the quantized data stored in `lhs`. + For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + lhs_quantization_max_val: An `int`. + The max value of the quantized data stored in `lhs`. + For example, if `Tin` is `qint8`, this must be set to 127. + rhs_quantization_min_val: An `int`. + The min value of the quantized data stored in `rhs`. + For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + rhs_quantization_max_val: An `int`. + The max value of the quantized data stored in `rhs`. + For example, if `Tin` is `qint8`, this must be set to 127. + output_quantization_min_val: An `int`. + The min value of the quantized data stored in `output`. + For example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + output_quantization_max_val: An `int`. + The max value of the quantized data stored in `output`. + For example, if `Tout` is `qint8`, this must be set to 127. + lhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `lhs`, only per-tensor quantization is supported. + Thus, this must be set to -1. + Other values will raise error at OpKernel construction. + rhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `rhs`, only per-tensor quantization + or per-channel quantization along `kernel_output_feature_dimension` is supported. + Thus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. + Other values will raise error at OpKernel construction. + output_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported. + Thus, this must be set to -1 or `dimension_numbers.output_feature_dimension`. + Other values will raise error at OpKernel construction. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `lhs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedAdd", name, lhs, rhs, lhs_scales, + lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, + output_zero_points, "lhs_quantization_axis", lhs_quantization_axis, + "lhs_quantization_min_val", lhs_quantization_min_val, + "lhs_quantization_max_val", lhs_quantization_max_val, + "rhs_quantization_axis", rhs_quantization_axis, + "rhs_quantization_min_val", rhs_quantization_min_val, + "rhs_quantization_max_val", rhs_quantization_max_val, + "output_quantization_axis", output_quantization_axis, + "output_quantization_min_val", output_quantization_min_val, + "output_quantization_max_val", output_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_add_eager_fallback( + lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, + output_scales, output_zero_points, + lhs_quantization_axis=lhs_quantization_axis, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_axis=rhs_quantization_axis, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_axis=output_quantization_axis, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedAdd", lhs=lhs, rhs=rhs, lhs_scales=lhs_scales, + lhs_zero_points=lhs_zero_points, + rhs_scales=rhs_scales, + rhs_zero_points=rhs_zero_points, + output_scales=output_scales, + output_zero_points=output_zero_points, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, + lhs_quantization_axis=lhs_quantization_axis, + rhs_quantization_axis=rhs_quantization_axis, + output_quantization_axis=output_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("lhs_quantization_axis", + _op._get_attr_int("lhs_quantization_axis"), + "lhs_quantization_min_val", + _op._get_attr_int("lhs_quantization_min_val"), + "lhs_quantization_max_val", + _op._get_attr_int("lhs_quantization_max_val"), + "rhs_quantization_axis", + _op._get_attr_int("rhs_quantization_axis"), + "rhs_quantization_min_val", + _op._get_attr_int("rhs_quantization_min_val"), + "rhs_quantization_max_val", + _op._get_attr_int("rhs_quantization_max_val"), + "output_quantization_axis", + _op._get_attr_int("output_quantization_axis"), + "output_quantization_min_val", + _op._get_attr_int("output_quantization_min_val"), + "output_quantization_max_val", + _op._get_attr_int("output_quantization_max_val"), "T", + _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedAdd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedAdd = tf_export("raw_ops.UniformQuantizedAdd")(_ops.to_raw_op(uniform_quantized_add)) + + +def uniform_quantized_add_eager_fallback(lhs: Annotated[Any, TV_UniformQuantizedAdd_T], rhs: Annotated[Any, TV_UniformQuantizedAdd_T], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, lhs_quantization_axis: int, rhs_quantization_axis: int, output_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedAdd_T]: + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.qint32, ]) + (lhs, rhs) = _inputs_T + lhs_scales = _ops.convert_to_tensor(lhs_scales, _dtypes.float32) + lhs_zero_points = _ops.convert_to_tensor(lhs_zero_points, _dtypes.int32) + rhs_scales = _ops.convert_to_tensor(rhs_scales, _dtypes.float32) + rhs_zero_points = _ops.convert_to_tensor(rhs_zero_points, _dtypes.int32) + output_scales = _ops.convert_to_tensor(output_scales, _dtypes.float32) + output_zero_points = _ops.convert_to_tensor(output_zero_points, _dtypes.int32) + _inputs_flat = [lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points] + _attrs = ("lhs_quantization_axis", lhs_quantization_axis, + "lhs_quantization_min_val", lhs_quantization_min_val, + "lhs_quantization_max_val", lhs_quantization_max_val, + "rhs_quantization_axis", rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val, "output_quantization_axis", + output_quantization_axis, "output_quantization_min_val", + output_quantization_min_val, "output_quantization_max_val", + output_quantization_max_val, "T", _attr_T) + _result = _execute.execute(b"UniformQuantizedAdd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedAdd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedClipByValue_T = TypeVar("TV_UniformQuantizedClipByValue_T", bound=_atypes.QInt32) + +def uniform_quantized_clip_by_value(operand: Annotated[Any, TV_UniformQuantizedClipByValue_T], min: Annotated[Any, TV_UniformQuantizedClipByValue_T], max: Annotated[Any, TV_UniformQuantizedClipByValue_T], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], quantization_min_val: int, quantization_max_val: int, quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedClipByValue_T]: + r"""Perform clip by value on the quantized Tensor `operand`. + + Given quantized `operand` which was quantized using `scales` and `zero_points`, performs clip by value using `min` and `max` values. + If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. + Otherwise (per-channel quantized), the clipping is also done per-channel. + + Args: + operand: A `Tensor`. Must be one of the following types: `qint32`. + Must be a Tensor of T. + min: A `Tensor`. Must have the same type as `operand`. + The min value(s) to clip operand. Must be a Tensor of T. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + max: A `Tensor`. Must have the same type as `operand`. + The min value(s) to clip operand. Must be a Tensor of T. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + scales: A `Tensor` of type `float32`. + The float value(s) used as scale(s) when quantizing `operand`, `min` and `max`. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). + zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point(s) when quantizing `operand`, `min` and `max`. + Same shape condition as scales. + quantization_min_val: An `int`. + The quantization min value that was used when operand was quantized. + quantization_max_val: An `int`. + The quantization max value that was used when operand was quantized. + quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, operand.dims()). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `operand`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedClipByValue", name, operand, min, max, scales, + zero_points, "quantization_axis", quantization_axis, + "quantization_min_val", quantization_min_val, "quantization_max_val", + quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_clip_by_value_eager_fallback( + operand, min, max, scales, zero_points, + quantization_axis=quantization_axis, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedClipByValue", operand=operand, min=min, max=max, + scales=scales, zero_points=zero_points, + quantization_min_val=quantization_min_val, + quantization_max_val=quantization_max_val, + quantization_axis=quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "quantization_axis", + _op._get_attr_int("quantization_axis"), "quantization_min_val", + _op._get_attr_int("quantization_min_val"), + "quantization_max_val", + _op._get_attr_int("quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedClipByValue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedClipByValue = tf_export("raw_ops.UniformQuantizedClipByValue")(_ops.to_raw_op(uniform_quantized_clip_by_value)) + + +def uniform_quantized_clip_by_value_eager_fallback(operand: Annotated[Any, TV_UniformQuantizedClipByValue_T], min: Annotated[Any, TV_UniformQuantizedClipByValue_T], max: Annotated[Any, TV_UniformQuantizedClipByValue_T], scales: Annotated[Any, _atypes.Float32], zero_points: Annotated[Any, _atypes.Int32], quantization_min_val: int, quantization_max_val: int, quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedClipByValue_T]: + quantization_min_val = _execute.make_int(quantization_min_val, "quantization_min_val") + quantization_max_val = _execute.make_int(quantization_max_val, "quantization_max_val") + if quantization_axis is None: + quantization_axis = -1 + quantization_axis = _execute.make_int(quantization_axis, "quantization_axis") + _attr_T, _inputs_T = _execute.args_to_matching_eager([operand, min, max], ctx, [_dtypes.qint32, ]) + (operand, min, max) = _inputs_T + scales = _ops.convert_to_tensor(scales, _dtypes.float32) + zero_points = _ops.convert_to_tensor(zero_points, _dtypes.int32) + _inputs_flat = [operand, min, max, scales, zero_points] + _attrs = ("T", _attr_T, "quantization_axis", quantization_axis, + "quantization_min_val", quantization_min_val, "quantization_max_val", + quantization_max_val) + _result = _execute.execute(b"UniformQuantizedClipByValue", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedClipByValue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedConvolution_Tin = TypeVar("TV_UniformQuantizedConvolution_Tin", bound=_atypes.QInt8) +TV_UniformQuantizedConvolution_Tout = TypeVar("TV_UniformQuantizedConvolution_Tout", bound=_atypes.QInt32) + +def uniform_quantized_convolution(lhs: Annotated[Any, TV_UniformQuantizedConvolution_Tin], rhs: Annotated[Any, TV_UniformQuantizedConvolution_Tin], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedConvolution_Tout, padding: str, lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, window_strides=[], explicit_padding=[], lhs_dilation=[], rhs_dilation=[], batch_group_count:int=1, feature_group_count:int=1, dimension_numbers:str="", lhs_quantization_axis:int=-1, rhs_quantization_axis:int=-1, output_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedConvolution_Tout]: + r"""Perform quantized convolution of quantized Tensor `lhs` and quantized Tensor `rhs`. to make quantized `output`. + + Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`. + + `lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions. + - `lhs_feature` % `feature_group_count` == 0 + - `lhs_feature` % `rhs_input_feature` == 0 + - `lhs_feature` / `feature_group_count` == `rhs_input_feature` + - `rhs_output_feature` % `feature_group_count` == 0 + - `lhs_batch` % `batch_group_count` == 0 + - `rhs_output_feature` % `batch_group_count` == 0 + + `lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: + ``` + quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val) + ``` + `output` is also quantized, using the same formula. + If `rhs` is per-tensor quantized, `output` must be also per-tensor quantized. + + Args: + lhs: A `Tensor`. Must be one of the following types: `qint8`. + Must be a quantized tensor, rank >= 3. + rhs: A `Tensor`. Must have the same type as `lhs`. + Must be a quantized tensor, same rank as `lhs`. + lhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale factors when quantizing the original data that `lhs` represents. + Must be a scalar `Tensor` (`lhs` supports only per-tensor quantization). + lhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that `lhs` represents. + Same shape condition as `lhs_scales`. + rhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale factors when quantizing the original data that `rhs` represents. + Must be a scalar `Tensor` for per-tensor quantization, + or 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization. + rhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that `rhs` represents. + Same shape condition as `rhs_scales`. + output_scales: A `Tensor` of type `float32`. + The float value(s) to use as scale factors when quantizing original data that `output` represents. + Must be a scalar `Tensor` for per-tensor quantization, + or 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)` + - which is equal to `output.dim_size(output_feature_dimension)`, + for per-channel quantization. + If `rhs` is per-tensor quantized, output must be also per-tensor quantized. + This means that if `rhs_scales` and `rhs_zero_points` are scalar `Tensor`s, `output_scales` and `output_zero_points` must be scalar `Tensor`s as well. + output_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero points when quantizing original data that output represents. + Same shape condition as `output_scales`. + Tout: A `tf.DType` from: `tf.qint32`. The type of `output` `Tensor`. + padding: A `string`. + string from: `"SAME"`, `"VALID"`, or `"EXPLICIT"`, indicating the type of padding algorithm to use. + lhs_quantization_min_val: An `int`. + The min value of the quantized data stored in `lhs`. + For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + lhs_quantization_max_val: An `int`. + The max value of the quantized data stored in `lhs`. + For example, if `Tin` is `qint8`, this must be set to 127. + rhs_quantization_min_val: An `int`. + The min value of the quantized data stored in `rhs`. + For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + rhs_quantization_max_val: An `int`. + The max value of the quantized data stored in `rhs`. + For example, if `Tin` is `qint8`, this must be set to 127. + output_quantization_min_val: An `int`. + The min value of the quantized data stored in `output`. + For example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not. + output_quantization_max_val: An `int`. + The max value of the quantized data stored in `output`. + For example, if `Tout` is `qint8`, this must be set to 127. + window_strides: An optional list of `ints`. Defaults to `[]`. + The stride of the sliding window for each spatial dimension of `lhs`. + Must be an empty list (default) or a list of size (number of spatial dimensions). + If an empty list is provided, the stride for each spatial dimension is set to 1. + explicit_padding: An optional list of `ints`. Defaults to `[]`. + If `padding` is `"EXPLICIT"`, must be set as a list indicating + the explicit paddings at the start and end of each `lhs` spatial dimension. + Otherwise, this must be empty. + + (If used,) Must be a list of size `2 * (number of lhs spatial dimensions)`, + where `(explicit_padding[2 * i], explicit_padding[2 * i + 1])` indicates + `(start_padding, end_padding)` of `spatial_dimensions[i]`. + lhs_dilation: An optional list of `ints`. Defaults to `[]`. + The dilation factor to apply in each spatial dimension of `lhs`. + Must be an empty list (default) or a list of size (number of `lhs` spatial dimensions). + If empty list, the dilation for each `lhs` spatial dimension is set to 1. + rhs_dilation: An optional list of `ints`. Defaults to `[]`. + The dilation factor to apply in each spatial dimension of `rhs`. + Must be an empty list (default) or a list of size (number of `rhs` spatial dimensions). + If empty list, the dilation for each `rhs` spatial dimension is set to 1. + batch_group_count: An optional `int`. Defaults to `1`. + The number of batch groups. Used for grouped filters. + Must be a divisor of `output_feature`. + feature_group_count: An optional `int`. Defaults to `1`. + The number of feature groups. Used for grouped convolutions. + Must be a divisor of both `lhs_feature` and `output_feature`. + dimension_numbers: An optional `string`. Defaults to `""`. + Structure of dimension information for the convolution op. + Must be an empty string (default) or a serialized string of `tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr` proto. + If empty string, the default is `("NCHW", "OIHW", "NCHW")` (for a 2D convolution). + lhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `lhs`, only per-tensor quantization is supported. + Thus, this must be set to -1. + Other values will raise error at OpKernel construction. + rhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `rhs`, only per-tensor quantization + or per-channel quantization along `kernel_output_feature_dimension` is supported. + Thus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. + Other values will raise error at OpKernel construction. + output_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported. + Thus, this must be set to -1 or `dimension_numbers.output_feature_dimension`. + Other values will raise error at OpKernel construction. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedConvolution", name, lhs, rhs, lhs_scales, + lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, + output_zero_points, "Tout", Tout, "window_strides", window_strides, + "padding", padding, "explicit_padding", explicit_padding, + "lhs_dilation", lhs_dilation, "rhs_dilation", rhs_dilation, + "batch_group_count", batch_group_count, "feature_group_count", + feature_group_count, "dimension_numbers", dimension_numbers, + "lhs_quantization_axis", lhs_quantization_axis, + "lhs_quantization_min_val", lhs_quantization_min_val, + "lhs_quantization_max_val", lhs_quantization_max_val, + "rhs_quantization_axis", rhs_quantization_axis, + "rhs_quantization_min_val", rhs_quantization_min_val, + "rhs_quantization_max_val", rhs_quantization_max_val, + "output_quantization_axis", output_quantization_axis, + "output_quantization_min_val", output_quantization_min_val, + "output_quantization_max_val", output_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_convolution_eager_fallback( + lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, + output_scales, output_zero_points, Tout=Tout, + window_strides=window_strides, padding=padding, + explicit_padding=explicit_padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, batch_group_count=batch_group_count, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + lhs_quantization_axis=lhs_quantization_axis, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_axis=rhs_quantization_axis, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_axis=output_quantization_axis, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + padding = _execute.make_str(padding, "padding") + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if window_strides is None: + window_strides = [] + if not isinstance(window_strides, (list, tuple)): + raise TypeError( + "Expected list for 'window_strides' argument to " + "'uniform_quantized_convolution' Op, not %r." % window_strides) + window_strides = [_execute.make_int(_i, "window_strides") for _i in window_strides] + if explicit_padding is None: + explicit_padding = [] + if not isinstance(explicit_padding, (list, tuple)): + raise TypeError( + "Expected list for 'explicit_padding' argument to " + "'uniform_quantized_convolution' Op, not %r." % explicit_padding) + explicit_padding = [_execute.make_int(_i, "explicit_padding") for _i in explicit_padding] + if lhs_dilation is None: + lhs_dilation = [] + if not isinstance(lhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'lhs_dilation' argument to " + "'uniform_quantized_convolution' Op, not %r." % lhs_dilation) + lhs_dilation = [_execute.make_int(_i, "lhs_dilation") for _i in lhs_dilation] + if rhs_dilation is None: + rhs_dilation = [] + if not isinstance(rhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'rhs_dilation' argument to " + "'uniform_quantized_convolution' Op, not %r." % rhs_dilation) + rhs_dilation = [_execute.make_int(_i, "rhs_dilation") for _i in rhs_dilation] + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + if feature_group_count is None: + feature_group_count = 1 + feature_group_count = _execute.make_int(feature_group_count, "feature_group_count") + if dimension_numbers is None: + dimension_numbers = "" + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedConvolution", lhs=lhs, rhs=rhs, + lhs_scales=lhs_scales, + lhs_zero_points=lhs_zero_points, + rhs_scales=rhs_scales, + rhs_zero_points=rhs_zero_points, + output_scales=output_scales, + output_zero_points=output_zero_points, + Tout=Tout, padding=padding, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, + window_strides=window_strides, + explicit_padding=explicit_padding, + lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + batch_group_count=batch_group_count, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + lhs_quantization_axis=lhs_quantization_axis, + rhs_quantization_axis=rhs_quantization_axis, + output_quantization_axis=output_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op._get_attr_type("Tin"), "Tout", + _op._get_attr_type("Tout"), "window_strides", + _op.get_attr("window_strides"), "padding", + _op.get_attr("padding"), "explicit_padding", + _op.get_attr("explicit_padding"), "lhs_dilation", + _op.get_attr("lhs_dilation"), "rhs_dilation", + _op.get_attr("rhs_dilation"), "batch_group_count", + _op._get_attr_int("batch_group_count"), "feature_group_count", + _op._get_attr_int("feature_group_count"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "lhs_quantization_axis", + _op._get_attr_int("lhs_quantization_axis"), + "lhs_quantization_min_val", + _op._get_attr_int("lhs_quantization_min_val"), + "lhs_quantization_max_val", + _op._get_attr_int("lhs_quantization_max_val"), + "rhs_quantization_axis", + _op._get_attr_int("rhs_quantization_axis"), + "rhs_quantization_min_val", + _op._get_attr_int("rhs_quantization_min_val"), + "rhs_quantization_max_val", + _op._get_attr_int("rhs_quantization_max_val"), + "output_quantization_axis", + _op._get_attr_int("output_quantization_axis"), + "output_quantization_min_val", + _op._get_attr_int("output_quantization_min_val"), + "output_quantization_max_val", + _op._get_attr_int("output_quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedConvolution", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedConvolution = tf_export("raw_ops.UniformQuantizedConvolution")(_ops.to_raw_op(uniform_quantized_convolution)) + + +def uniform_quantized_convolution_eager_fallback(lhs: Annotated[Any, TV_UniformQuantizedConvolution_Tin], rhs: Annotated[Any, TV_UniformQuantizedConvolution_Tin], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedConvolution_Tout, padding: str, lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, window_strides, explicit_padding, lhs_dilation, rhs_dilation, batch_group_count: int, feature_group_count: int, dimension_numbers: str, lhs_quantization_axis: int, rhs_quantization_axis: int, output_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedConvolution_Tout]: + Tout = _execute.make_type(Tout, "Tout") + padding = _execute.make_str(padding, "padding") + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if window_strides is None: + window_strides = [] + if not isinstance(window_strides, (list, tuple)): + raise TypeError( + "Expected list for 'window_strides' argument to " + "'uniform_quantized_convolution' Op, not %r." % window_strides) + window_strides = [_execute.make_int(_i, "window_strides") for _i in window_strides] + if explicit_padding is None: + explicit_padding = [] + if not isinstance(explicit_padding, (list, tuple)): + raise TypeError( + "Expected list for 'explicit_padding' argument to " + "'uniform_quantized_convolution' Op, not %r." % explicit_padding) + explicit_padding = [_execute.make_int(_i, "explicit_padding") for _i in explicit_padding] + if lhs_dilation is None: + lhs_dilation = [] + if not isinstance(lhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'lhs_dilation' argument to " + "'uniform_quantized_convolution' Op, not %r." % lhs_dilation) + lhs_dilation = [_execute.make_int(_i, "lhs_dilation") for _i in lhs_dilation] + if rhs_dilation is None: + rhs_dilation = [] + if not isinstance(rhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'rhs_dilation' argument to " + "'uniform_quantized_convolution' Op, not %r." % rhs_dilation) + rhs_dilation = [_execute.make_int(_i, "rhs_dilation") for _i in rhs_dilation] + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + if feature_group_count is None: + feature_group_count = 1 + feature_group_count = _execute.make_int(feature_group_count, "feature_group_count") + if dimension_numbers is None: + dimension_numbers = "" + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _attr_Tin, _inputs_Tin = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.qint8, ]) + (lhs, rhs) = _inputs_Tin + lhs_scales = _ops.convert_to_tensor(lhs_scales, _dtypes.float32) + lhs_zero_points = _ops.convert_to_tensor(lhs_zero_points, _dtypes.int32) + rhs_scales = _ops.convert_to_tensor(rhs_scales, _dtypes.float32) + rhs_zero_points = _ops.convert_to_tensor(rhs_zero_points, _dtypes.int32) + output_scales = _ops.convert_to_tensor(output_scales, _dtypes.float32) + output_zero_points = _ops.convert_to_tensor(output_zero_points, _dtypes.int32) + _inputs_flat = [lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "window_strides", window_strides, + "padding", padding, "explicit_padding", explicit_padding, "lhs_dilation", + lhs_dilation, "rhs_dilation", rhs_dilation, "batch_group_count", + batch_group_count, "feature_group_count", feature_group_count, + "dimension_numbers", dimension_numbers, "lhs_quantization_axis", + lhs_quantization_axis, "lhs_quantization_min_val", lhs_quantization_min_val, + "lhs_quantization_max_val", lhs_quantization_max_val, + "rhs_quantization_axis", rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val, "output_quantization_axis", + output_quantization_axis, "output_quantization_min_val", + output_quantization_min_val, "output_quantization_max_val", + output_quantization_max_val) + _result = _execute.execute(b"UniformQuantizedConvolution", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedConvolution", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedConvolutionHybrid_Tlhs = TypeVar("TV_UniformQuantizedConvolutionHybrid_Tlhs", bound=_atypes.Float32) +TV_UniformQuantizedConvolutionHybrid_Trhs = TypeVar("TV_UniformQuantizedConvolutionHybrid_Trhs", bound=_atypes.QInt8) +TV_UniformQuantizedConvolutionHybrid_Tout = TypeVar("TV_UniformQuantizedConvolutionHybrid_Tout", bound=_atypes.Float32) + +def uniform_quantized_convolution_hybrid(lhs: Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Tlhs], rhs: Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Trhs], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedConvolutionHybrid_Tout, padding: str, rhs_quantization_min_val: int, rhs_quantization_max_val: int, window_strides=[], explicit_padding=[], lhs_dilation=[], rhs_dilation=[], batch_group_count:int=1, feature_group_count:int=1, dimension_numbers:str="", rhs_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Tout]: + r"""Perform hybrid quantized convolution of float Tensor `lhs` and quantized Tensor `rhs`. + + Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, + and then performs quantized convolution on quantized `lhs` and `rhs`. + + The internal quantization on `lhs` is a quantization to `Trhs`, dynamic range, + per-batch (per-axis along axis `dimension_numbers.input_batch_dimension`), asymmetric, + and not narrow range (the range is [Trhs_MIN, Trhs_MAX]). + + `lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions. + - lhs_feature % feature_group_count == 0 + - lhs_feature % rhs_input_feature == 0 + - lhs_feature / feature_group_count == rhs_input_feature + - rhs_output_feature % feature_group_count == 0 + - lhs_batch % batch_group_count == 0 + - rhs_output_feature % batch_group_count == 0 + + `rhs` must be quantized Tensor, where its data value is quantized using the formula: + quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`. + Must be a non-quantized Tensor of `Tlhs`, rank >= 3. + rhs: A `Tensor`. Must be one of the following types: `qint8`. + Must be a quantized Tensor of `Trhs`, same rank as `lhs`. + rhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale factors when quantizing the original data that `rhs` represents. + Must be a scalar Tensor for per-tensor quantization, + or 1D Tensor of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization. + rhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point when quantizing original data that `rhs` represents. + Same shape condition as `rhs_scales`. + Tout: A `tf.DType` from: `tf.float32`. The type of output Tensor. + padding: A `string`. + string from: `"SAME"`, `"VALID"`, or `"EXPLICIT"`, indicating the type of padding algorithm to use. + rhs_quantization_min_val: An `int`. + The min value of the quantized data stored in `rhs`. + For example, if `Trhs` is qint8, this must be set to -127 if narrow range quantized or -128 if not. + rhs_quantization_max_val: An `int`. + The max value of the quantized data stored in `rhs`. + For example, if `Trhs` is qint8, this must be set to 127. + window_strides: An optional list of `ints`. Defaults to `[]`. + The stride of the sliding window for each spatial dimension of `lhs`. + Must be an empty list (default) or a list of size (number of spatial dimensions). + If an empty list is provided, the stride for each spatial dimension is set to 1. + explicit_padding: An optional list of `ints`. Defaults to `[]`. + If `padding` Attr is `"EXPLICIT"`, must be set as a list indicating + the explicit paddings at the start and end of each lhs spatial dimension. + Otherwise, this Attr is must be empty. + + (If used,) Must be a list of size 2 * (number of lhs spatial dimensions), + where (explicit_padding[2 * i], explicit_padding[2 * i + 1]) indicates + spatial_dimensions[i] (start_padding, end_padding). + lhs_dilation: An optional list of `ints`. Defaults to `[]`. + The dilation factor to apply in each spatial dimension of `lhs`. + Must be an empty list (default) or a list of size (number of lhs spatial dimensions). + If empty list, the dilation for each lhs spatial dimension is set to 1. + rhs_dilation: An optional list of `ints`. Defaults to `[]`. + The dilation factor to apply in each spatial dimension of `rhs`. + Must be an empty list (default) or a list of size (number of rhs spatial dimensions). + If empty list, the dilation for each rhs spatial dimension is set to 1. + batch_group_count: An optional `int`. Defaults to `1`. + The number of batch groups. Used for grouped filters. + Must be a divisor of output_feature. + feature_group_count: An optional `int`. Defaults to `1`. + The number of feature groups. Used for grouped convolutions. + Must be a divisor of both lhs_feature and output_feature. + dimension_numbers: An optional `string`. Defaults to `""`. + Structure of dimension information for the convolution op. + Must be an empty string (default) or a serialized string of tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr proto. + If empty string, the default is `("NCHW", "OIHW", "NCHW")` (for a 2D convolution). + rhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For the `rhs`, only per-tensor quantization + or per-channel quantization along kernel_output_feature_dimension is supported. + Thus, this attribute must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. + Other values will raise error at OpKernel construction. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedConvolutionHybrid", name, lhs, rhs, rhs_scales, + rhs_zero_points, "Tout", Tout, "window_strides", window_strides, + "padding", padding, "explicit_padding", explicit_padding, + "lhs_dilation", lhs_dilation, "rhs_dilation", rhs_dilation, + "batch_group_count", batch_group_count, "feature_group_count", + feature_group_count, "dimension_numbers", dimension_numbers, + "rhs_quantization_axis", rhs_quantization_axis, + "rhs_quantization_min_val", rhs_quantization_min_val, + "rhs_quantization_max_val", rhs_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_convolution_hybrid_eager_fallback( + lhs, rhs, rhs_scales, rhs_zero_points, Tout=Tout, + window_strides=window_strides, padding=padding, + explicit_padding=explicit_padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, batch_group_count=batch_group_count, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + rhs_quantization_axis=rhs_quantization_axis, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + padding = _execute.make_str(padding, "padding") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + if window_strides is None: + window_strides = [] + if not isinstance(window_strides, (list, tuple)): + raise TypeError( + "Expected list for 'window_strides' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % window_strides) + window_strides = [_execute.make_int(_i, "window_strides") for _i in window_strides] + if explicit_padding is None: + explicit_padding = [] + if not isinstance(explicit_padding, (list, tuple)): + raise TypeError( + "Expected list for 'explicit_padding' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % explicit_padding) + explicit_padding = [_execute.make_int(_i, "explicit_padding") for _i in explicit_padding] + if lhs_dilation is None: + lhs_dilation = [] + if not isinstance(lhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'lhs_dilation' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % lhs_dilation) + lhs_dilation = [_execute.make_int(_i, "lhs_dilation") for _i in lhs_dilation] + if rhs_dilation is None: + rhs_dilation = [] + if not isinstance(rhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'rhs_dilation' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % rhs_dilation) + rhs_dilation = [_execute.make_int(_i, "rhs_dilation") for _i in rhs_dilation] + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + if feature_group_count is None: + feature_group_count = 1 + feature_group_count = _execute.make_int(feature_group_count, "feature_group_count") + if dimension_numbers is None: + dimension_numbers = "" + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedConvolutionHybrid", lhs=lhs, rhs=rhs, + rhs_scales=rhs_scales, + rhs_zero_points=rhs_zero_points, + Tout=Tout, padding=padding, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + window_strides=window_strides, + explicit_padding=explicit_padding, + lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + batch_group_count=batch_group_count, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + rhs_quantization_axis=rhs_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tlhs", _op._get_attr_type("Tlhs"), "Trhs", + _op._get_attr_type("Trhs"), "Tout", _op._get_attr_type("Tout"), + "window_strides", _op.get_attr("window_strides"), "padding", + _op.get_attr("padding"), "explicit_padding", + _op.get_attr("explicit_padding"), "lhs_dilation", + _op.get_attr("lhs_dilation"), "rhs_dilation", + _op.get_attr("rhs_dilation"), "batch_group_count", + _op._get_attr_int("batch_group_count"), "feature_group_count", + _op._get_attr_int("feature_group_count"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "rhs_quantization_axis", + _op._get_attr_int("rhs_quantization_axis"), + "rhs_quantization_min_val", + _op._get_attr_int("rhs_quantization_min_val"), + "rhs_quantization_max_val", + _op._get_attr_int("rhs_quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedConvolutionHybrid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedConvolutionHybrid = tf_export("raw_ops.UniformQuantizedConvolutionHybrid")(_ops.to_raw_op(uniform_quantized_convolution_hybrid)) + + +def uniform_quantized_convolution_hybrid_eager_fallback(lhs: Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Tlhs], rhs: Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Trhs], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedConvolutionHybrid_Tout, padding: str, rhs_quantization_min_val: int, rhs_quantization_max_val: int, window_strides, explicit_padding, lhs_dilation, rhs_dilation, batch_group_count: int, feature_group_count: int, dimension_numbers: str, rhs_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedConvolutionHybrid_Tout]: + Tout = _execute.make_type(Tout, "Tout") + padding = _execute.make_str(padding, "padding") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + if window_strides is None: + window_strides = [] + if not isinstance(window_strides, (list, tuple)): + raise TypeError( + "Expected list for 'window_strides' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % window_strides) + window_strides = [_execute.make_int(_i, "window_strides") for _i in window_strides] + if explicit_padding is None: + explicit_padding = [] + if not isinstance(explicit_padding, (list, tuple)): + raise TypeError( + "Expected list for 'explicit_padding' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % explicit_padding) + explicit_padding = [_execute.make_int(_i, "explicit_padding") for _i in explicit_padding] + if lhs_dilation is None: + lhs_dilation = [] + if not isinstance(lhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'lhs_dilation' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % lhs_dilation) + lhs_dilation = [_execute.make_int(_i, "lhs_dilation") for _i in lhs_dilation] + if rhs_dilation is None: + rhs_dilation = [] + if not isinstance(rhs_dilation, (list, tuple)): + raise TypeError( + "Expected list for 'rhs_dilation' argument to " + "'uniform_quantized_convolution_hybrid' Op, not %r." % rhs_dilation) + rhs_dilation = [_execute.make_int(_i, "rhs_dilation") for _i in rhs_dilation] + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + if feature_group_count is None: + feature_group_count = 1 + feature_group_count = _execute.make_int(feature_group_count, "feature_group_count") + if dimension_numbers is None: + dimension_numbers = "" + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + _attr_Tlhs, (lhs,) = _execute.args_to_matching_eager([lhs], ctx, [_dtypes.float32, ]) + _attr_Trhs, (rhs,) = _execute.args_to_matching_eager([rhs], ctx, [_dtypes.qint8, ]) + rhs_scales = _ops.convert_to_tensor(rhs_scales, _dtypes.float32) + rhs_zero_points = _ops.convert_to_tensor(rhs_zero_points, _dtypes.int32) + _inputs_flat = [lhs, rhs, rhs_scales, rhs_zero_points] + _attrs = ("Tlhs", _attr_Tlhs, "Trhs", _attr_Trhs, "Tout", Tout, + "window_strides", window_strides, "padding", padding, "explicit_padding", + explicit_padding, "lhs_dilation", lhs_dilation, "rhs_dilation", + rhs_dilation, "batch_group_count", batch_group_count, "feature_group_count", + feature_group_count, "dimension_numbers", dimension_numbers, + "rhs_quantization_axis", rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val) + _result = _execute.execute(b"UniformQuantizedConvolutionHybrid", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedConvolutionHybrid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedDot_Tin = TypeVar("TV_UniformQuantizedDot_Tin", bound=_atypes.QInt8) +TV_UniformQuantizedDot_Tout = TypeVar("TV_UniformQuantizedDot_Tout", bound=_atypes.QInt32) + +def uniform_quantized_dot(lhs: Annotated[Any, TV_UniformQuantizedDot_Tin], rhs: Annotated[Any, TV_UniformQuantizedDot_Tin], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedDot_Tout, lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, lhs_quantization_axis:int=-1, rhs_quantization_axis:int=-1, output_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedDot_Tout]: + r"""Perform quantized dot of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`. + + Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`. + `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). + `lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: + quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + `output` is also quantized, using the same formula. + If `rhs` is per-tensor quantized, `output` must be also per-tensor quantized. + + Args: + lhs: A `Tensor`. Must be one of the following types: `qint8`. + Must be a 2D Tensor of Tin. + rhs: A `Tensor`. Must have the same type as `lhs`. + Must be a 2D Tensor of Tin. + lhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale when quantizing original data that lhs represents. + Must be a scalar Tensor (lhs supports only per-tensor quantization). + lhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point when quantizing original data that lhs represents. + Same shape condition as lhs_scales. + rhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale when quantizing original data that rhs represents. + Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization). + rhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point when quantizing original data that rhs represents. + Same shape condition as rhs_scales. + output_scales: A `Tensor` of type `float32`. + The float value(s) to use as scales when quantizing original data that output represents. + Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (output.dim_size(1),) (per-channel quantization). + If rhs is per-tensor quantized, output must be also per-tensor quantized. + This means that if rhs_scales and rhs_zero_points are scalar Tensors, output_scales and output_zero_points must be scalar Tensors as well. + output_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point when quantizing original data that output represents. + Same shape condition as rhs_scales. + Tout: A `tf.DType` from: `tf.qint32`. The type of output Tensor. + lhs_quantization_min_val: An `int`. + The min value of the quantized data stored in lhs. + For example, if Tin is qint8, this must be set to -127 if narrow range quantized or -128 if not. + lhs_quantization_max_val: An `int`. + The max value of the quantized data stored in rhs. + For example, if Tin is qint8, this must be set to 127. + rhs_quantization_min_val: An `int`. + The min value of the quantized data stored in rhs. + For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not. + rhs_quantization_max_val: An `int`. + The max value of the quantized data stored in rhs. + For example, if Trhs is qint8, this must be set to 127. + output_quantization_min_val: An `int`. + The min value of the quantized data stored in output. + For example, if Tout is qint8, this must be set to -127 if narrow range quantized or -128 if not. + output_quantization_max_val: An `int`. + The max value of the quantized data stored in output. + For example, if Tout is qint8, this must be set to 127. + lhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For dot op lhs, only per-tensor quantization is supported. + Thus, this attribute must be set to -1. Other values are rejected. + rhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported. + Thus, this attribute must be set to -1 or 1. Other values are rejected. + output_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For dot op output, only per-tensor quantization or per-channel quantization along dimension 1 is supported. + Thus, this attribute must be set to -1 or 1. Other values are rejected. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedDot", name, lhs, rhs, lhs_scales, + lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, + output_zero_points, "Tout", Tout, "lhs_quantization_axis", + lhs_quantization_axis, "lhs_quantization_min_val", + lhs_quantization_min_val, "lhs_quantization_max_val", + lhs_quantization_max_val, "rhs_quantization_axis", + rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val, "output_quantization_axis", + output_quantization_axis, "output_quantization_min_val", + output_quantization_min_val, "output_quantization_max_val", + output_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_dot_eager_fallback( + lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, + output_scales, output_zero_points, Tout=Tout, + lhs_quantization_axis=lhs_quantization_axis, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_axis=rhs_quantization_axis, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_axis=output_quantization_axis, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedDot", lhs=lhs, rhs=rhs, lhs_scales=lhs_scales, + lhs_zero_points=lhs_zero_points, + rhs_scales=rhs_scales, + rhs_zero_points=rhs_zero_points, + output_scales=output_scales, + output_zero_points=output_zero_points, + Tout=Tout, + lhs_quantization_min_val=lhs_quantization_min_val, + lhs_quantization_max_val=lhs_quantization_max_val, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, + lhs_quantization_axis=lhs_quantization_axis, + rhs_quantization_axis=rhs_quantization_axis, + output_quantization_axis=output_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op._get_attr_type("Tin"), "Tout", + _op._get_attr_type("Tout"), "lhs_quantization_axis", + _op._get_attr_int("lhs_quantization_axis"), + "lhs_quantization_min_val", + _op._get_attr_int("lhs_quantization_min_val"), + "lhs_quantization_max_val", + _op._get_attr_int("lhs_quantization_max_val"), + "rhs_quantization_axis", + _op._get_attr_int("rhs_quantization_axis"), + "rhs_quantization_min_val", + _op._get_attr_int("rhs_quantization_min_val"), + "rhs_quantization_max_val", + _op._get_attr_int("rhs_quantization_max_val"), + "output_quantization_axis", + _op._get_attr_int("output_quantization_axis"), + "output_quantization_min_val", + _op._get_attr_int("output_quantization_min_val"), + "output_quantization_max_val", + _op._get_attr_int("output_quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedDot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedDot = tf_export("raw_ops.UniformQuantizedDot")(_ops.to_raw_op(uniform_quantized_dot)) + + +def uniform_quantized_dot_eager_fallback(lhs: Annotated[Any, TV_UniformQuantizedDot_Tin], rhs: Annotated[Any, TV_UniformQuantizedDot_Tin], lhs_scales: Annotated[Any, _atypes.Float32], lhs_zero_points: Annotated[Any, _atypes.Int32], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedDot_Tout, lhs_quantization_min_val: int, lhs_quantization_max_val: int, rhs_quantization_min_val: int, rhs_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, lhs_quantization_axis: int, rhs_quantization_axis: int, output_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedDot_Tout]: + Tout = _execute.make_type(Tout, "Tout") + lhs_quantization_min_val = _execute.make_int(lhs_quantization_min_val, "lhs_quantization_min_val") + lhs_quantization_max_val = _execute.make_int(lhs_quantization_max_val, "lhs_quantization_max_val") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if lhs_quantization_axis is None: + lhs_quantization_axis = -1 + lhs_quantization_axis = _execute.make_int(lhs_quantization_axis, "lhs_quantization_axis") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _attr_Tin, _inputs_Tin = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.qint8, ]) + (lhs, rhs) = _inputs_Tin + lhs_scales = _ops.convert_to_tensor(lhs_scales, _dtypes.float32) + lhs_zero_points = _ops.convert_to_tensor(lhs_zero_points, _dtypes.int32) + rhs_scales = _ops.convert_to_tensor(rhs_scales, _dtypes.float32) + rhs_zero_points = _ops.convert_to_tensor(rhs_zero_points, _dtypes.int32) + output_scales = _ops.convert_to_tensor(output_scales, _dtypes.float32) + output_zero_points = _ops.convert_to_tensor(output_zero_points, _dtypes.int32) + _inputs_flat = [lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "lhs_quantization_axis", + lhs_quantization_axis, "lhs_quantization_min_val", lhs_quantization_min_val, + "lhs_quantization_max_val", lhs_quantization_max_val, + "rhs_quantization_axis", rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val, "output_quantization_axis", + output_quantization_axis, "output_quantization_min_val", + output_quantization_min_val, "output_quantization_max_val", + output_quantization_max_val) + _result = _execute.execute(b"UniformQuantizedDot", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedDot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformQuantizedDotHybrid_Tlhs = TypeVar("TV_UniformQuantizedDotHybrid_Tlhs", bound=_atypes.Float32) +TV_UniformQuantizedDotHybrid_Trhs = TypeVar("TV_UniformQuantizedDotHybrid_Trhs", bound=_atypes.QInt8) +TV_UniformQuantizedDotHybrid_Tout = TypeVar("TV_UniformQuantizedDotHybrid_Tout", bound=_atypes.Float32) + +def uniform_quantized_dot_hybrid(lhs: Annotated[Any, TV_UniformQuantizedDotHybrid_Tlhs], rhs: Annotated[Any, TV_UniformQuantizedDotHybrid_Trhs], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedDotHybrid_Tout, rhs_quantization_min_val: int, rhs_quantization_max_val: int, rhs_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformQuantizedDotHybrid_Tout]: + r"""Perform hybrid quantized dot of float Tensor `lhs` and quantized Tensor `rhs`. + + Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, and then performs quantized dot on quantized lhs and `rhs`. + The internal quantization on `lhs` is a quantization to qint8, dynamic range, per-batch (per-axis along axis 0), asymmetric, and not narrow range (the range is [-128, 127]). + `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). + `rhs` must be quantized Tensor, where its data value is quantized using the formula: + quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`. + Must be a 2D Tensor of Tlhs. + rhs: A `Tensor`. Must be one of the following types: `qint8`. + Must be a 2D Tensor of Trhs. + rhs_scales: A `Tensor` of type `float32`. + The float value(s) used as scale when quantizing original data that rhs represents. + Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization). + rhs_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point when quantizing original data that rhs represents. + Same shape condition as rhs_scales. + Tout: A `tf.DType` from: `tf.float32`. The type of output Tensor. + rhs_quantization_min_val: An `int`. + The min value of the quantized data stored in rhs. + For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not. + rhs_quantization_max_val: An `int`. + The max value of the quantized data stored in rhs. + For example, if Trhs is qint8, this must be set to 127. + rhs_quantization_axis: An optional `int`. Defaults to `-1`. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. + For dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported. + Thus, this attribute must be set to -1 or 1. Other values are rejected. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformQuantizedDotHybrid", name, lhs, rhs, rhs_scales, + rhs_zero_points, "Tout", Tout, "rhs_quantization_axis", + rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_quantized_dot_hybrid_eager_fallback( + lhs, rhs, rhs_scales, rhs_zero_points, Tout=Tout, + rhs_quantization_axis=rhs_quantization_axis, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformQuantizedDotHybrid", lhs=lhs, rhs=rhs, rhs_scales=rhs_scales, + rhs_zero_points=rhs_zero_points, + Tout=Tout, + rhs_quantization_min_val=rhs_quantization_min_val, + rhs_quantization_max_val=rhs_quantization_max_val, + rhs_quantization_axis=rhs_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tlhs", _op._get_attr_type("Tlhs"), "Trhs", + _op._get_attr_type("Trhs"), "Tout", _op._get_attr_type("Tout"), + "rhs_quantization_axis", + _op._get_attr_int("rhs_quantization_axis"), + "rhs_quantization_min_val", + _op._get_attr_int("rhs_quantization_min_val"), + "rhs_quantization_max_val", + _op._get_attr_int("rhs_quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformQuantizedDotHybrid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformQuantizedDotHybrid = tf_export("raw_ops.UniformQuantizedDotHybrid")(_ops.to_raw_op(uniform_quantized_dot_hybrid)) + + +def uniform_quantized_dot_hybrid_eager_fallback(lhs: Annotated[Any, TV_UniformQuantizedDotHybrid_Tlhs], rhs: Annotated[Any, TV_UniformQuantizedDotHybrid_Trhs], rhs_scales: Annotated[Any, _atypes.Float32], rhs_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformQuantizedDotHybrid_Tout, rhs_quantization_min_val: int, rhs_quantization_max_val: int, rhs_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformQuantizedDotHybrid_Tout]: + Tout = _execute.make_type(Tout, "Tout") + rhs_quantization_min_val = _execute.make_int(rhs_quantization_min_val, "rhs_quantization_min_val") + rhs_quantization_max_val = _execute.make_int(rhs_quantization_max_val, "rhs_quantization_max_val") + if rhs_quantization_axis is None: + rhs_quantization_axis = -1 + rhs_quantization_axis = _execute.make_int(rhs_quantization_axis, "rhs_quantization_axis") + _attr_Tlhs, (lhs,) = _execute.args_to_matching_eager([lhs], ctx, [_dtypes.float32, ]) + _attr_Trhs, (rhs,) = _execute.args_to_matching_eager([rhs], ctx, [_dtypes.qint8, ]) + rhs_scales = _ops.convert_to_tensor(rhs_scales, _dtypes.float32) + rhs_zero_points = _ops.convert_to_tensor(rhs_zero_points, _dtypes.int32) + _inputs_flat = [lhs, rhs, rhs_scales, rhs_zero_points] + _attrs = ("Tlhs", _attr_Tlhs, "Trhs", _attr_Trhs, "Tout", Tout, + "rhs_quantization_axis", rhs_quantization_axis, "rhs_quantization_min_val", + rhs_quantization_min_val, "rhs_quantization_max_val", + rhs_quantization_max_val) + _result = _execute.execute(b"UniformQuantizedDotHybrid", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformQuantizedDotHybrid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UniformRequantize_Tin = TypeVar("TV_UniformRequantize_Tin", _atypes.QInt32, _atypes.QInt8) +TV_UniformRequantize_Tout = TypeVar("TV_UniformRequantize_Tout", _atypes.QInt32, _atypes.QInt8) + +def uniform_requantize(input: Annotated[Any, TV_UniformRequantize_Tin], input_scales: Annotated[Any, _atypes.Float32], input_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformRequantize_Tout, input_quantization_min_val: int, input_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, input_quantization_axis:int=-1, output_quantization_axis:int=-1, name=None) -> Annotated[Any, TV_UniformRequantize_Tout]: + r"""Given quantized tensor `input`, requantize it with new quantization parameters. + + Given quantized tensor `input`, which was quantized using {input_scales, input_zero_points, input_quantization_axis, input_quantization_min_val, input_quantization_max_val}, + requantize it to a tensor, which is quantized using {output_scales, output_zero_points, output_quantization_axis, output_quantization_min_val, output_quantization_max_val}. + The requantization is done by using the formula: + output_quantized_data = clip( + (input_quantized_data - input_zero_point) * (input_scale / output_scale) + output_zero_point, + output_quantization_min_val, + output_quantization_max_val) + + Per-tensor and per-axis quantization supported cases are followings: + * per-tensor -> per-tensor + * per-tensor -> per-axis + * per-axis -> per-axis where input_quantization_axis equals output_quantization_axis. + i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `qint32`. + Must be a Tensor of Tin. + input_scales: A `Tensor` of type `float32`. + The float value(s) used as scale(s) when quantizing original data that `input` represents. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + input_zero_points: A `Tensor` of type `int32`. + The int32 value(s) used as zero_point(s) when quantizing original data that `input` represents. + Same shape condition as scales. + output_scales: A `Tensor` of type `float32`. + The float value(s) to use as new scale(s) to quantize original data that `input` represents. + Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). + output_zero_points: A `Tensor` of type `int32`. + The int32 value(s) to use as new zero_point(s) to quantize original data that `input` represents. + Same shape condition as scales. + Tout: A `tf.DType` from: `tf.qint8, tf.qint32`. + The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 + input_quantization_min_val: An `int`. + The quantization min value that was used when quantizing original data that `input` represents. + The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: + `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. + For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not. + input_quantization_max_val: An `int`. + The quantization max value that was used when quantizing original data that `input` represents. + The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: + `(Tout max)` for both narrow range and not narrow range. + For example, if Tin is qint8, this is set to 127. + output_quantization_min_val: An `int`. + The new quantization min value to quantize original data that `input` represents. + output_quantization_max_val: An `int`. + The new quantization max value to quantize original data that `input` represents. + input_quantization_axis: An optional `int`. Defaults to `-1`. + The quantization axis that was used when quantizing original data that `input` represents. + Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. + If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). + output_quantization_axis: An optional `int`. Defaults to `-1`. + The new quantization axis to use to quantize original data that `input` represents. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniformRequantize", name, input, input_scales, + input_zero_points, output_scales, output_zero_points, "Tout", Tout, + "input_quantization_axis", input_quantization_axis, + "input_quantization_min_val", input_quantization_min_val, + "input_quantization_max_val", input_quantization_max_val, + "output_quantization_axis", output_quantization_axis, + "output_quantization_min_val", output_quantization_min_val, + "output_quantization_max_val", output_quantization_max_val) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uniform_requantize_eager_fallback( + input, input_scales, input_zero_points, output_scales, + output_zero_points, Tout=Tout, + input_quantization_axis=input_quantization_axis, + input_quantization_min_val=input_quantization_min_val, + input_quantization_max_val=input_quantization_max_val, + output_quantization_axis=output_quantization_axis, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + input_quantization_min_val = _execute.make_int(input_quantization_min_val, "input_quantization_min_val") + input_quantization_max_val = _execute.make_int(input_quantization_max_val, "input_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if input_quantization_axis is None: + input_quantization_axis = -1 + input_quantization_axis = _execute.make_int(input_quantization_axis, "input_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniformRequantize", input=input, input_scales=input_scales, + input_zero_points=input_zero_points, + output_scales=output_scales, + output_zero_points=output_zero_points, Tout=Tout, + input_quantization_min_val=input_quantization_min_val, + input_quantization_max_val=input_quantization_max_val, + output_quantization_min_val=output_quantization_min_val, + output_quantization_max_val=output_quantization_max_val, + input_quantization_axis=input_quantization_axis, + output_quantization_axis=output_quantization_axis, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op._get_attr_type("Tin"), "Tout", + _op._get_attr_type("Tout"), "input_quantization_axis", + _op._get_attr_int("input_quantization_axis"), + "input_quantization_min_val", + _op._get_attr_int("input_quantization_min_val"), + "input_quantization_max_val", + _op._get_attr_int("input_quantization_max_val"), + "output_quantization_axis", + _op._get_attr_int("output_quantization_axis"), + "output_quantization_min_val", + _op._get_attr_int("output_quantization_min_val"), + "output_quantization_max_val", + _op._get_attr_int("output_quantization_max_val")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniformRequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniformRequantize = tf_export("raw_ops.UniformRequantize")(_ops.to_raw_op(uniform_requantize)) + + +def uniform_requantize_eager_fallback(input: Annotated[Any, TV_UniformRequantize_Tin], input_scales: Annotated[Any, _atypes.Float32], input_zero_points: Annotated[Any, _atypes.Int32], output_scales: Annotated[Any, _atypes.Float32], output_zero_points: Annotated[Any, _atypes.Int32], Tout: TV_UniformRequantize_Tout, input_quantization_min_val: int, input_quantization_max_val: int, output_quantization_min_val: int, output_quantization_max_val: int, input_quantization_axis: int, output_quantization_axis: int, name, ctx) -> Annotated[Any, TV_UniformRequantize_Tout]: + Tout = _execute.make_type(Tout, "Tout") + input_quantization_min_val = _execute.make_int(input_quantization_min_val, "input_quantization_min_val") + input_quantization_max_val = _execute.make_int(input_quantization_max_val, "input_quantization_max_val") + output_quantization_min_val = _execute.make_int(output_quantization_min_val, "output_quantization_min_val") + output_quantization_max_val = _execute.make_int(output_quantization_max_val, "output_quantization_max_val") + if input_quantization_axis is None: + input_quantization_axis = -1 + input_quantization_axis = _execute.make_int(input_quantization_axis, "input_quantization_axis") + if output_quantization_axis is None: + output_quantization_axis = -1 + output_quantization_axis = _execute.make_int(output_quantization_axis, "output_quantization_axis") + _attr_Tin, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.qint32, ]) + input_scales = _ops.convert_to_tensor(input_scales, _dtypes.float32) + input_zero_points = _ops.convert_to_tensor(input_zero_points, _dtypes.int32) + output_scales = _ops.convert_to_tensor(output_scales, _dtypes.float32) + output_zero_points = _ops.convert_to_tensor(output_zero_points, _dtypes.int32) + _inputs_flat = [input, input_scales, input_zero_points, output_scales, output_zero_points] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "input_quantization_axis", + input_quantization_axis, "input_quantization_min_val", + input_quantization_min_val, "input_quantization_max_val", + input_quantization_max_val, "output_quantization_axis", + output_quantization_axis, "output_quantization_min_val", + output_quantization_min_val, "output_quantization_max_val", + output_quantization_max_val) + _result = _execute.execute(b"UniformRequantize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniformRequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradient_checker_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradient_checker_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..85f9f2f5cea46887df593033ab53b2b647ee5631 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradient_checker_v2.py @@ -0,0 +1,364 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradient checker for functions. + +The gradient checker verifies numerically that an function properly +computes the gradients +""" +import numpy as np + +from tensorflow.python.eager import backprop +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gradients_impl # pylint: disable=unused-import +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util.tf_export import tf_export + + +def _product(t): + if isinstance(t, int): + return t + else: + y = 1 + for x in t: + y *= x + return y + + +def _eval_indexed_slices(a): + """Converts IndexedSlices to IndexedSlicesValue with numpy indices/values. + + When eager execution is enabled, converts IndexedSlices + to IndexedSlicesValue with numpy indices/values. + + Args: + a: any value. + + Returns: + If a is IndexedSlices and eager execution is enabled, calls numpy() on a's + fields. Otherwise returns a unchanged. + """ + if (isinstance(a, indexed_slices.IndexedSlices) and + context.executing_eagerly()): + return indexed_slices.IndexedSlicesValue( + indices=[x.numpy() for x in a.indices], + values=[x.numpy() for x in a.values], + dense_shape=a.dense_shape) + return a + + +def _to_numpy(a): + """Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays. + + Args: + a: any value. + + Returns: + If a is EagerTensor or Tensor, returns the evaluation of a by calling + numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding + dense numpy array. Otherwise returns a unchanged. + """ + if isinstance(a, ops.EagerTensor): + return a.numpy() + if isinstance(a, tensor.Tensor): + sess = ops.get_default_session() + return sess.run(a) + if isinstance(a, indexed_slices.IndexedSlicesValue): + arr = np.zeros(a.dense_shape) + assert len(a.values) == len(a.indices), ( + "IndexedSlicesValue has %s value slices but %s indices\n%s" % + (a.values, a.indices, a)) + for values_slice, index in zip(a.values, a.indices): + assert 0 <= index < len(arr), ( + "IndexedSlicesValue has invalid index %s\n%s" % (index, a)) + arr[index] += values_slice + return arr + return a + + +def _prepare(f, xs_dtypes, xs_shapes): + """Return a function that executes 'f'. + + In TF 2.x, this is the same as `f`. + In TF 1.x, returns a Python function that executes the graph defined by `f` + in a Session. + + Args: + f: the function. + xs_dtypes: dtypes of f's arguments. + xs_shapes: shapes of f's arguments. + + Returns: + """ + if context.executing_eagerly(): + + def decorated_eager(*xs_data): + return f(*map(ops.convert_to_tensor, xs_data)) + + return decorated_eager + xs = [ + array_ops.placeholder(x_dtype, shape=x_shape) + for x_dtype, x_shape in zip(xs_dtypes, xs_shapes) + ] + y = f(*xs) + sess = ops.get_default_session() + + def decorated_graph(*xs_data): + xs_data = [_to_numpy(a) for a in xs_data] + return sess.run(y, feed_dict=dict(zip(xs, xs_data))) + + return decorated_graph + + +def _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param): + """Computes the theoretical Jacobian for f regarding xs[param]. + + One can think of the relation among f, xs and y as y = f(xs). + + Args: + f: the function. + y_shape: the shape of the result. + y_dtype: the dtype of the result. + xs: a list of tensors. + param: the index of the target parameter. + + Returns: + A 2-d numpy array representing the Jacobian. It has "y_size" rows + and "x_size" columns where "x_size" is the number of elements in xs[param] + and "y_size" is the number of elements in the result. + + Raises: + ValueError: If result is empty but the gradient is nonzero. + """ + x = xs[param] + # Complex vectors are treated as vectors of twice as many reals. + x_shape = tuple(x.shape) + (2,) if x.dtype.is_complex else x.shape + y_factor = 2 if y_dtype.is_complex else 1 + + # To compute the jacobian, we treat x and y as one-dimensional vectors. + x_size = _product(x_shape) + x_val_size = _product(x_shape[1:]) # This is used for sparse gradients + y_size = _product(y_shape) * y_factor + + # Allocate 2-D Jacobian, with y dimensions smashed into the first + # dimension and x dimensions smashed into the second. + jacobian = np.zeros((y_size, x_size), dtype=x.dtype.real_dtype.as_numpy_dtype) + + # For each of the entry of dy, we set this to be 1 and + # everything else to be 0 and compute the gradients -- this will give us one + # row of the Jacobian matrix. + dy_data = np.zeros(y_shape, dtype=y_dtype.as_numpy_dtype) + dy_data_flat = dy_data.ravel().view(y_dtype.real_dtype.as_numpy_dtype) + grad_fn_unprep = backprop.gradients_function(f, [param]) + grad_fn = _prepare(lambda dy, *xs: grad_fn_unprep(*xs, dy=dy), + [y_dtype] + [z.dtype for z in xs], + [None] + [z.shape for z in xs]) + for row in range(y_size): + dy_data_flat[row] = 1 + grad = _to_numpy(grad_fn(dy_data, *xs)[0]) + grad = _eval_indexed_slices(grad) + if isinstance(grad, indexed_slices.IndexedSlicesValue): + for i, v in zip(grad.indices, grad.values): + c_begin = i * x_val_size + c_end = c_begin + x_val_size + jacobian[row, c_begin:c_end] += v.flat + elif grad is not None: + jacobian[row, :] = grad.ravel().view(jacobian.dtype) + # This reset of `dy_data_flat` needs to happen after `grad` is copied to + # `jacobian` because `grad` and `dy_data_flat` may share memory. + dy_data_flat[row] = 0 + + # If the output is empty, run the gradients at least once and make sure + # they produce zeros. + if y_size == 0: # don't use 'not y_size', because y_size may not be an int + grad = _to_numpy(grad_fn(dy_data, *xs)[0]) + if grad.shape != x.shape: + raise ValueError("Empty gradient has wrong shape: expected %s, got %s" % + (x.shape, grad.shape)) + if np.any(grad): + raise ValueError("Empty tensor with nonzero gradients") + + logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian) + return jacobian + + +def _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta): + """Computes the numeric Jacobian for f regarding xs[param]. + + One can think of the relation among f, xs and y as y = f(xs). + + Args: + f: the function. + y_size: the number of elements of the result. + y_dtype: the dtype of the result. + xs: a list of tensors. + param: the index of the target parameter. + delta: the amount of perturbation we give to the input. + + Returns: + A 2-d numpy array representing the Jacobian. It has "y_size" rows + and "x_size" columns where "x_size" is the number of elements in xs[param] + and "y_size" is the number of elements in the result. + """ + x_shape = xs[param].shape + x_dtype = xs[param].dtype + + # To compute the jacobian, we treat x and y as one-dimensional vectors + x_size = _product(x_shape) * (2 if x_dtype.is_complex else 1) + y_size = y_size * (2 if y_dtype.is_complex else 1) + x_dtype = x_dtype.real_dtype.as_numpy_dtype + y_dtype = y_dtype.real_dtype.as_numpy_dtype + + xs_dtypes = [x.dtype for x in xs] + xs_shapes = [x.shape for x in xs] + # Converts xs to numpy arrays to do in-place perturbation. + # Calls asarray() to avoid copying in ravel() later. + xs = [np.asarray(_to_numpy(x)) for x in xs] + x = xs[param] + + # Make sure we have the right types + scale = np.asarray(2 * delta, dtype=y_dtype)[()] + + jacobian = np.zeros((y_size, x_size), dtype=x_dtype) + + # For each of the entry of x, we slightly perturbs this by adding and + # subtracting a delta and then compute difference between the outputs. This + # will give us one column of the Jacobian matrix. + f = _prepare(f, xs_dtypes, xs_shapes) + for col in range(x_size): + original = x.ravel().view(x_dtype)[col] + x.ravel().view(x_dtype)[col] += delta + y_pos = _to_numpy(f(*xs)) + x.ravel().view(x_dtype)[col] = original + x.ravel().view(x_dtype)[col] -= delta + y_neg = _to_numpy(f(*xs)) + x.ravel().view(x_dtype)[col] = original + diff = (y_pos - y_neg) / scale + jacobian[:, col] = diff.ravel().view(y_dtype) + + logging.vlog(1, "Numeric Jacobian =\n%s", jacobian) + return jacobian + + +def _compute_gradient(f, y_shape, y_dtype, xs, param, delta): + """Computes the theoretical and numerical jacobian.""" + x = xs[param] + t = x.dtype + allowed_types = [ + dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, + dtypes.complex64, dtypes.complex128 + ] + assert t.base_dtype in allowed_types, ("Cannot compute gradient for " + "unsupported type %s of argument %s" % + (t.name, param)) + t2 = y_dtype + assert t2.base_dtype in allowed_types, ("Cannot compute gradient for " + "unsupported type %s of y" % t2.name) + y_size = _product(y_shape) + jacob_t = _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param) + jacob_n = _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta) + return jacob_t, jacob_n + + +def _compute_gradient_list(f, xs, delta): + """Compute gradients for a list of x values.""" + # convert xs to tensors so that dtype and shape have uniform types + xs = [ops.convert_to_tensor(x) for x in xs] + # run the function to get info of the result + xs_dtypes = [x.dtype for x in xs] + xs_shapes = [x.shape for x in xs] + f_temp = _prepare(f, xs_dtypes, xs_shapes) + y = f_temp(*xs) + return tuple( + zip(*[ + _compute_gradient(f, y.shape, dtypes.as_dtype(y.dtype), xs, i, delta) + for i in range(len(xs)) + ])) + + +@tf_export("test.compute_gradient", v1=[]) +def compute_gradient(f, x, delta=None): + """Computes the theoretical and numeric Jacobian of `f`. + + With y = f(x), computes the theoretical and numeric Jacobian dy/dx. + + Args: + f: the function. + x: the arguments for the function as a list or tuple of values convertible + to a Tensor. + delta: (optional) perturbation used to compute numeric Jacobian. + + Returns: + A pair of lists, where the first is a list of 2-d numpy arrays representing + the theoretical Jacobians for each argument, and the second list is the + numerical ones. Each 2-d array has "y_size" rows + and "x_size" columns where "x_size" is the number of elements in the + corresponding argument and "y_size" is the number of elements in f(x). + + Raises: + ValueError: If result is empty but the gradient is nonzero. + ValueError: If x is not list, but any other type. + + Example: + + >>> @tf.function + ... def test_func(x): + ... return x*x + ... + >>> + >>> class MyTest(tf.test.TestCase): + ... + ... def test_gradient_of_test_func(self): + ... theoretical, numerical = tf.test.compute_gradient(test_func, [1.0]) + ... # ((array([[2.]], dtype=float32),), + ... # (array([[2.000004]], dtype=float32),)) + ... self.assertAllClose(theoretical, numerical) + + """ + if not isinstance(x, (list, tuple)): + raise ValueError( + "`x` must be a list or tuple of values convertible to a Tensor " + "(arguments to `f`), not a %s" % type(x)) + if delta is None: + # By default, we use a step size for the central finite difference + # approximation that is exactly representable as a binary floating + # point number, since this reduces the amount of noise due to rounding + # in the approximation of some functions. + delta = 1.0 / 1024 + return _compute_gradient_list(f, x, delta) + + +def max_error(grad1, grad2): + """Computes maximum elementwise gap. + + Computes the maximum elementwise gap between two lists of tensors of the same + shape. + + Args: + grad1: a lists of tensors. + grad2: a lists of tensors with the same shape as grad1. + + Returns: + The maximum elementwise gap between the two. + """ + error = 0 + for j_t, j_n in zip(grad1, grad2): + if j_t.size or j_n.size: # Handle zero size tensors correctly + error = np.maximum(error, np.fabs(j_t - j_n).max()) + return error diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradients_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradients_util.py new file mode 100644 index 0000000000000000000000000000000000000000..fa568ea706cf3633f9fe94369007ccdd5ac52eb4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gradients_util.py @@ -0,0 +1,1089 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implements the graph generation for computation of gradients.""" + +import collections +import contextlib + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python import pywrap_tfe +from tensorflow.python.eager import backprop_util +from tensorflow.python.eager import context +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import control_flow_state +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import default_gradient +from tensorflow.python.ops import gen_functional_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import compat +from tensorflow.python.util import object_identity +from tensorflow.python.util import variable_utils +from tensorflow.python.util.compat import collections_abc +from tensorflow.python.util.tf_export import tf_export + + +def _MarkReachedOps(from_ops, reached_ops, func_graphs): + """Mark all ops reached from "from_ops". + + Args: + from_ops: list of Operations. + reached_ops: set of Operations. + func_graphs: list of FuncGraphs. This method will traverse through + these functions if they capture from_ops or any reachable ops. + """ + queue = collections.deque() + queue.extend(from_ops) + while queue: + op = queue.popleft() + if op not in reached_ops: + reached_ops.add(op) + for output in op.outputs: + if backprop_util.IsTrainable(output): + queue.extend(_Consumers(output, func_graphs)) + + +def _PendingCount( + to_ops: list[ops.Operation], + from_ops: list[ops.Operation], + colocate_gradients_with_ops, + func_graphs, + xs_set, +): + """Initialize the pending count for ops between two lists of Operations. + + 'pending_count[op]' indicates the number of backprop inputs + to this operation. + + Args: + to_ops: list of Operations. + from_ops: list of Operations. + colocate_gradients_with_ops: Python bool. See docstring of gradients(). + func_graphs: list of FuncGraphs. This method will traverse through + these functions if they capture from_ops or any reachable ops. This is + useful if to_ops occur in a function and from_ops are in an outer function + or graph. + xs_set: ObjectIdentitySet of Tensors. + + Returns: + A tuple containing: (1) the subset of to_ops reachable from from_ops by a + path of zero or more backpropagatable tensors, (2) a mapping from operation + to the number of backprop inputs to that op, and (3) a ControlFlowState + object which is not None if the ops between from_ops and to_ops contain + control flow loops. + """ + # Mark reachable ops from from_ops. + reached_ops = set() + _MarkReachedOps(from_ops, reached_ops, func_graphs) + # X in reached_ops iff X is reachable from from_ops by a path of zero or more + # backpropagatable tensors. + + reachable_to_ops = set(op for op in to_ops if op in reached_ops) + + # Mark between ops. + between_ops = set() + between_op_list = [] + queue = collections.deque() + queue.extend(to_ops) + while queue: + op = queue.popleft() + # We are interested in this op. + if op in reached_ops: + between_ops.add(op) + between_op_list.append(op) + # Clear the boolean so we won't add the inputs again. + reached_ops.remove(op) + for inp in _NonEagerInputs(op, xs_set): + queue.append(inp.op) + # X in between_ops iff X is on a path of zero or more backpropagatable tensors + # between from_ops and to_ops + + # 'loop_state' is None if there are no while loops. + loop_state = control_flow_state.MaybeCreateControlFlowState( + between_op_list, between_ops, colocate_gradients_with_ops) + + # Initialize pending count for between ops. + pending_count = collections.defaultdict(int) + for op in between_op_list: + for x in _NonEagerInputs(op, xs_set): + if x.op in between_ops: + pending_count[x.op] += 1 + + return reachable_to_ops, pending_count, loop_state + + +def _AsList(x): + return x if isinstance(x, (list, tuple)) else [x] + + +def _DefaultGradYs(grad_ys, + ys, + colocate_gradients_with_ops, + gradient_uid="__unsupported__"): + """Fill in default values for grad_ys. + + Args: + grad_ys: List of gradients, can contain None. + ys: List of tensors. + colocate_gradients_with_ops: If True, try colocating gradients with + the corresponding op. + gradient_uid: A unique identifier within the graph indicating + which invocation of gradients is being executed. Used to cluster + ops for compilation. + + Returns: + A list of gradients to use, without None. + + Raises: + ValueError: If sizes of gradients and inputs don't match + TypeError: If type of any gradient is not valid for its input. + """ + if len(grad_ys) != len(ys): + raise ValueError(f"Length mismatch. Passed {len(grad_ys)} grad_ys for " + f"{len(ys)} ys") + grad_ys = indexed_slices.convert_n_to_tensor_or_indexed_slices( + grad_ys, name="grad_y") + new_grad_ys = [] + for i, (y, grad_y) in enumerate(zip(ys, grad_ys)): + with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops): + if grad_y is None: + if y.dtype.is_complex: + raise TypeError( + f"Gradients of complex tensors ({y}) must set grad_ys (y.dtype = " + f"{dtypes.as_dtype(y.dtype).name})" + ) + new_grad_ys.append( + array_ops.ones( + array_ops.shape(y), dtype=y.dtype, name="grad_ys_%d" % i + ) + ) + continue + if y.dtype.is_floating or y.dtype.is_integer: + if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer: + raise TypeError( + f"Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated " + f"for real or integer-valued tensor {y} with type " + f"{dtypes.as_dtype(y.dtype).name} must be real or integer" + ) + elif y.dtype.is_complex: + if not grad_y.dtype.is_complex: + raise TypeError( + f"Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated " + f"for complex-valued tensor {y} with type " + f"{dtypes.as_dtype(y.dtype).name} must be real" + ) + elif y.dtype == dtypes.variant: + if grad_y.dtype != dtypes.variant: + raise TypeError( + f"Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated " + f"for variant tensor {y} with type " + f"{dtypes.as_dtype(y.dtype).name} must be variant" + ) + elif y.dtype == dtypes.resource: + # We assume y is the handle of a ResourceVariable. The gradient of a + # ResourceVariable should be a numeric value, not another resource. + if grad_y.dtype == dtypes.resource: + raise TypeError( + f"Input gradient {grad_y} for resource tensor {y} " + "should not be a resource" + ) + else: + raise TypeError( + f"Tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be " + "numeric to obtain a default gradient" + ) + # Create a grad_y tensor in the name scope of the gradient. + # Required for TensorArrays to identify which gradient call a + # grad_y value is coming from. + if isinstance(grad_y, indexed_slices.IndexedSlices): + new_grad_ys.append( + indexed_slices.IndexedSlices( + indices=( + array_ops.identity( + grad_y.indices, name="grad_ys_%d_indices" % i + ) + if isinstance(grad_y.indices, tensor_lib.Tensor) + else grad_y.indices + ), + values=( + array_ops.identity( + grad_y.values, name="grad_ys_%d_values" % i + ) + if isinstance(grad_y.values, tensor_lib.Tensor) + else grad_y.values + ), + dense_shape=( + array_ops.identity( + grad_y.dense_shape, name="grad_ys_%d_shape" % i + ) + if isinstance(grad_y.dense_shape, tensor_lib.Tensor) + else grad_y.dense_shape + ), + ) + ) + else: + new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i)) + + return new_grad_ys + + +def _VerifyGeneratedGradients(grads, op: ops.Operation): + """Verify that gradients are valid in number and type. + + Args: + grads: List of generated gradients. + op: Operation for which the gradients where generated. + + Raises: + ValueError: if sizes of gradients and inputs don't match. + TypeError: if type of any gradient is not valid for its input. + """ + # While ops have inputs added to them during the gradient computation, so we + # skip the below check. See while_v2 for details. + if op.type == "While" or op.type == "StatelessWhile": + return + + if len(grads) != len(op.inputs): + raise ValueError( + f"Num gradients {len(grads)} generated for op " + f"{op.node_def} do not match num inputs {len(op.inputs)}" + ) + + +def _StopOps( + from_ops: list[ops.Operation], + stop_gradient_ops: list[ops.Operation], + pending_count, + xs_set, +): + """The set of ops that terminate the gradient computation. + + This computes the frontier of the forward graph *before* which backprop + should stop. Operations in the returned set will not be differentiated. + This set is defined as the subset of `from_ops` containing ops that have + no predecessor in `from_ops`. `pending_count` is the result of + `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops` + iff pending_count[op] > 0. + + In addition, none of `stop_gradient_ops` will be differentiated. + + Args: + from_ops: list of Operations. + stop_gradient_ops: list of Operations never to backprop through. + pending_count: mapping from operation to number of backprop inputs. + xs_set: ObjectIdentitySet of Tensors. + + Returns: + The set of operations. + """ + stop_ops = set() + for op in from_ops: + is_stop_op = True + for inp in _NonEagerInputs(op, xs_set): + if pending_count[inp.op] > 0: + is_stop_op = False + break + if is_stop_op: + stop_ops.add(op) + stop_ops.update(op for op in stop_gradient_ops) + return stop_ops + + +@contextlib.contextmanager +def _maybe_colocate_with( # pylint: disable=invalid-name + op: ops.Operation, + gradient_uid, + colocate_gradients_with_ops, +): + """Context to colocate with `op` if `colocate_gradients_with_ops`.""" + if colocate_gradients_with_ops: + with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access + yield + else: + yield + + +def _IsPartitionedCall(op: ops.Operation): + return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall" + + +def _SymGrad(op: ops.Operation, out_grads): + """Backprop through a function call node op given its outputs' gradients.""" + f_in = [x for x in op.inputs] + out_grads + f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs] + f = attr_value_pb2.NameAttrList() + if _IsPartitionedCall(op): + f.name = op.get_attr("f").name + else: + f.name = op.type + for k in op.node_def.attr: + f.attr[k].CopyFrom(op.node_def.attr[k]) + in_grads = gen_functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f) + return in_grads + + +def _MaybeCompile(scope, op: ops.Operation, func, grad_fn): + """Compile the calculation in grad_fn if op was marked as compiled.""" + scope = scope.rstrip("/").replace("/", "_") + if func is not None: + xla_compile = func.cached_definition.attr["_XlaCompile"].b + xla_separate_compiled_gradients = func.cached_definition.attr[ + "_XlaSeparateCompiledGradients"].b + xla_scope = func.cached_definition.attr["_XlaScope"].s.decode() + else: + try: + xla_compile = op.get_attr("_XlaCompile") + xla_separate_compiled_gradients = op.get_attr( + "_XlaSeparateCompiledGradients") + xla_scope = op.get_attr("_XlaScope").decode() + except ValueError: + xla_compile = False + + if not xla_compile: + return grad_fn() # Exit early + + # If the gradients are supposed to be compiled separately, we give them a + # _XlaScope name that is based on the name_scope of the gradients. Otherwise + # they just inherit the existing _XlaScope name, which lets them be merged + # together with the non-gradient computation. + if xla_separate_compiled_gradients: + xla_grad_scope = "%s_grad_%s" % (xla_scope, scope) + else: + xla_grad_scope = xla_scope + + attrs = { + "_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile), + "_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode()) + } + with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access + return grad_fn() + + +def _RaiseNoGradWrtInitialLoopValError( + op: ops.Operation, + from_ops: list[ops.Operation], + xs_set, +): + """Raises an error if we backprop through a loop var.""" + # Find the nearest 'to_op' reachable from 'op' to provide a more helpful error + # message. + target_op = None + queue = collections.deque([op]) + visited = set() + while queue: + curr_op = queue.popleft() + if curr_op in visited: continue + visited.add(curr_op) + if curr_op in from_ops: + target_op = curr_op + break + queue.extend(t.op for t in _NonEagerInputs(curr_op, xs_set)) + assert target_op + raise ValueError( + "Cannot compute gradient inside while loop with respect to op " + f"'{target_op.name}'. We do not support taking the gradient wrt or " + "through the initial value of a loop variable. Gradients can be computed " + "through loop invariants or wrt the input parameters to the loop body.") + + +def _IsFunction(graph): + # isinstance check for FuncGraphs that avoids the explicit dependency + # on func_graph.py and function.py + return isinstance(graph, ops.Graph) and graph._building_function # pylint: disable=protected-access + + +def _Captures(func_graph): + assert _IsFunction(func_graph) + return func_graph.captures + + +def _MaybeCaptured(t): + """If t is a captured value placeholder, returns the original captured value. + + Args: + t: Tensor + + Returns: + A tensor, potentially from a different Graph/FuncGraph. + """ + # pylint: disable=protected-access + if (not isinstance(t, ops.EagerTensor) and + _IsFunction(t.op.graph) and t.op.type == "Placeholder"): + for input_t, placeholder_t in _Captures(t.op.graph): + if t is placeholder_t: + return _MaybeCaptured(input_t) + # pylint: enable=protected-access + return t + + +def _NonEagerInputs(op: ops.Operation, xs_set): + """Returns the inputs of op, crossing closure boundaries where necessary. + + Does not return any captured EagerTensors, i.e., the number of tensors + returned may be less than the actual number of inputs. + + Args: + op: Operation + xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. + + Returns: + A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op + is in a FuncGraph and has captured inputs. + """ + return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)] + + +# TODO(skyewm): plumbing xs through everywhere is ugly, consider making +# _GradientsHelper a class with xs as a member variable. +def _Inputs(op: ops.Operation, xs_set): + """Returns the inputs of op, crossing closure boundaries where necessary. + + Args: + op: Operation + xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. + + Returns: + A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op + is in a FuncGraph and has captured inputs. + """ + if _IsFunction(op.graph): # pylint: disable=protected-access + inputs = [] + for t in op.inputs: + # If we're differentiating w.r.t. `t`, do not attempt to traverse through + # it to a captured value. The algorithm needs to "see" `t` in this case, + # even if it's a function input for a captured value, whereas usually we'd + # like to traverse through these closures as if the captured value was the + # direct input to op. + if t not in xs_set: + t = _MaybeCaptured(t) + inputs.append(t) + return inputs + else: + return op.inputs + + +def _Consumers(t, func_graphs): + """Returns the consumers of t, crossing closure boundaries where necessary. + + Args: + t: Tensor + func_graphs: a list of FuncGraphs that may have captured t. + + Returns: + A list of tensors. The tensors will be from the current graph and/or + func_graphs. + """ + consumers = t.consumers() + for func in func_graphs: + for input_t, placeholder in _Captures(func): + if input_t is t: + consumers.extend(_Consumers(placeholder, func_graphs)) + return consumers + + +def _GradientsHelper(ys, + xs, + grad_ys=None, + name="gradients", + colocate_gradients_with_ops=False, + gate_gradients=False, + aggregation_method=None, + stop_gradients=None, + unconnected_gradients=UnconnectedGradients.NONE, + src_graph=None): + """Implementation of gradients().""" + if context.executing_eagerly(): + raise RuntimeError("tf.gradients is not supported when eager execution " + "is enabled. Use tf.GradientTape instead.") + ys = variable_utils.convert_variables_to_tensors(_AsList(ys)) + xs = [ + x.handle if resource_variable_ops.is_resource_variable(x) else x + for x in _AsList(xs) + ] + if grad_ys is not None: + grad_ys = _AsList(grad_ys) + + # Handle CompositeTensors. + if (any(isinstance(x, composite_tensor.CompositeTensor) for x in xs) or + any(isinstance(y, composite_tensor.CompositeTensor) for y in ys)): + flat_xs = composite_tensor_gradient.get_flat_tensors_for_gradients(xs) + flat_ys = composite_tensor_gradient.get_flat_tensors_for_gradients(ys) + flat_grad_ys = ( + None if grad_ys is None else + composite_tensor_gradient.get_flat_tensors_for_gradients(grad_ys)) + flat_grads = _GradientsHelper(flat_ys, flat_xs, flat_grad_ys, name, + colocate_gradients_with_ops, gate_gradients, + aggregation_method, stop_gradients, + unconnected_gradients, src_graph) + return composite_tensor_gradient.replace_flat_tensors_for_gradients( + xs, flat_grads) + + if src_graph is None: + src_graph = ops.get_default_graph() + try: + unconnected_gradients = UnconnectedGradients(unconnected_gradients) + except ValueError: + raise ValueError( + f"Unknown value for unconnected_gradients: '{unconnected_gradients}'") + + # If src_graph is a _FuncGraph (i.e. a function body), gather it and all + # ancestor graphs. This is necessary for correctly handling captured values. + func_graphs = [] + curr_graph = src_graph + while _IsFunction(curr_graph): + func_graphs.append(curr_graph) + curr_graph = curr_graph.outer_graph + + stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients) + if grad_ys is None: + grad_ys = [None] * len(ys) + + with ops.name_scope( + name, "gradients", + list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope: + # Get a uid for this call to gradients that can be used to help + # cluster ops for compilation. + gradient_uid = ops.get_default_graph().unique_name("uid") + ys = indexed_slices.convert_n_to_tensor_or_indexed_slices(ys, name="y") + xs = indexed_slices.internal_convert_n_to_tensor_or_indexed_slices( + xs, name="x", as_ref=True) + xs_set = object_identity.ObjectIdentitySet(xs) + grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, + gradient_uid) + + # The approach we take here is as follows: Create a list of all ops in the + # subgraph between the ys and xs. Visit these ops in reverse order of ids + # to ensure that when we visit an op the gradients w.r.t its outputs have + # been collected. Then aggregate these gradients if needed, call the op's + # gradient function, and add the generated gradients to the gradients for + # its input. + + # Initialize the pending count for ops in the connected subgraph from ys + # to the xs. + to_ops = [t.op for t in ys] + from_ops = [t.op for t in xs] + stop_gradient_ops = [t.op for t in stop_gradients] + reachable_to_ops, pending_count, loop_state = _PendingCount( + to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs_set) + + # Iterate over the collected ops. + # + # grads: op => list of gradients received on each output endpoint of the + # op. The gradients for each endpoint are initially collected as a list. + # When it is time to call the op's gradient function, for each endpoint we + # aggregate the list of received gradients into a Add() Operation if there + # is more than one. + grads = {} + + # Add the initial gradients for the ys. + for y, grad_y in zip(ys, grad_ys): + _SetGrad(grads, y, grad_y) + + # Initialize queue with to_ops. + queue = collections.deque() + # Add the ops in 'to_ops' into the queue. + to_ops_set = set() + for op in to_ops: + # 'ready' handles the case where one output gradient relies on + # another output's gradient. + ready = (pending_count[op] == 0) + if ready and op not in to_ops_set and op in reachable_to_ops: + to_ops_set.add(op) + queue.append(op) + + if loop_state: + loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set) + for y in loop_exits: + if backprop_util.IsTrainable(y): + _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) + queue.append(y.op) + + stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set) + while queue: + # generate gradient subgraph for op. + op = queue.popleft() + with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): + if loop_state: + loop_state.EnterGradWhileContext(op, before=True) + out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state, + aggregation_method) + if loop_state: + loop_state.ExitGradWhileContext(op, before=True) + + grad_fn = None + func_call = None + is_partitioned_call = _IsPartitionedCall(op) + # pylint: disable=protected-access + is_func_call = src_graph._is_function(op.type) or is_partitioned_call + # pylint: enable=protected-access + has_out_grads = any( + isinstance(g, tensor_lib.Tensor) or g for g in out_grads + ) + if has_out_grads and (op not in stop_ops): + try: + grad_fn = ops.get_gradient_function(op) + except LookupError: + if is_func_call: + if is_partitioned_call: + func_name = compat.as_bytes(op.get_attr("f").name) + func_call = src_graph._get_function( # pylint: disable=protected-access + func_name) + # When a graph is imported, the FunctionDefs are not copied over + # to each sub-graph so we recursively search the outer graphs + # for the FunctionDef. + if not func_call and hasattr(src_graph, "outer_graph"): + graph = src_graph.outer_graph + while graph is not None: + func_call = graph._get_function(func_name) # pylint: disable=protected-access + if func_call is not None: + break + if hasattr(graph, "outer_graph"): + graph = graph.outer_graph + else: + break + else: + func_call = src_graph._get_function(op.type) # pylint: disable=protected-access + # Note that __defun is not set if the graph is + # imported. If it's set, we prefer to access the original + # defun. + func_call = getattr(op, "__defun", func_call) + grad_fn = func_call.python_grad_func + else: + raise LookupError( + "No gradient defined for operation" + f"'{op.name}' (op type: {op.type}). " + "In general every operation must have an associated " + "`@tf.RegisterGradient` for correct autodiff, which this " + "op is lacking. If you want to pretend this " + "operation is a constant in your program, you may insert " + "`tf.stop_gradient`. This can be useful to silence the " + "error in cases where you know gradients are not needed, " + "e.g. the forward pass of tf.custom_gradient. " + "Please see more details in " + "https://www.tensorflow.org/api_docs/python/tf/custom_gradient.") # pylint: disable=line-too-long + if loop_state: + loop_state.EnterGradWhileContext(op, before=False) + + # NOTE(skyewm): We don't support computing gradients wrt a loop variable + # unless it's within the context of a single iteration (i.e. the + # gradient is wrt to the loop parameter in the body function, not wrt or + # through the initial value). This means if we're in a while loop + # context, we should never see a switch node from this context. + # pylint: disable=protected-access + if (control_flow_util.IsSwitch(op) and + op._control_flow_context is not None and + op._control_flow_context.IsWhileContext() and + op._control_flow_context == + ops.get_default_graph()._get_control_flow_context()): + _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set) + # pylint: enable=protected-access + + if (grad_fn or is_func_call) and has_out_grads: + # NOTE: If _AggregatedGrads didn't compute a value for the i'th + # output, it means that the cost does not depend on output[i], + # therefore dC/doutput[i] is 0. + for i, out_grad in enumerate(out_grads): + if ( + not isinstance(out_grad, tensor_lib.Tensor) and not out_grad + ) and ( + (not grad_fn and is_func_call) + or backprop_util.IsTrainable(op.outputs[i]) + ): + # Only trainable outputs or outputs for a function call that + # will use SymbolicGradient get a zero gradient. Gradient + # functions should ignore the gradient for other outputs. + # TODO(apassos) gradients of resource handles might be an + # issue here because of zeros. + if loop_state: + out_grads[i] = loop_state.ZerosLikeV1WhileLoop(op, i) + elif default_gradient.supports_default_grad(op.outputs[i]): + # TODO(b/143286622): The supports_default_grad check is needed + # because While op emits non-differentiable resource tensors + # as outputs. Remove this check when that is not the case. + out_grads[i] = control_flow_state.ZerosLike(op, i) + with ops.name_scope(op.name + "_grad"): + # pylint: disable=protected-access + with src_graph._original_op(op): + # pylint: enable=protected-access + if grad_fn: + # If grad_fn was found, do not use SymbolicGradient even for + # functions. + in_grads = _MaybeCompile(grad_scope, op, func_call, + lambda: grad_fn(op, *out_grads)) + else: + # For function call ops, we add a 'SymbolicGradient' + # node to the graph to compute gradients. + in_grads = _MaybeCompile(grad_scope, op, func_call, + lambda: _SymGrad(op, out_grads)) + in_grads = _AsList(in_grads) + _VerifyGeneratedGradients(in_grads, op) + if gate_gradients and len([x for x in in_grads + if x is not None]) > 1: + with ops.device(None): + with ops._colocate_with_for_gradient( # pylint: disable=protected-access + None, + gradient_uid, + ignore_existing=True): + in_grads = control_flow_ops.tuple(in_grads) + _LogOpGradients(op, out_grads, in_grads) + else: + # If no grad_fn is defined or none of out_grads is available, + # just propagate a list of None backwards. + in_grads = [None] * len(_Inputs(op, xs_set)) + # Note: we don't filter out eager inputs here because the inputs need to + # line up with in_grads. + for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs_set), in_grads)): + if in_grad is not None: + if (isinstance(in_grad, tensor_lib.Tensor) and + t_in.dtype != dtypes.resource): + try: + in_grad.set_shape(t_in.get_shape()) + except ValueError: + raise ValueError( + "Incompatible shapes between op input and calculated " + f"input gradient. Forward operation: {op.name}. Input " + f"index: {i}. Original input shape: {t_in.shape}. " + f"Calculated input gradient shape: {in_grad.shape}") + if not isinstance(t_in, ops.EagerTensor): + _SetGrad(grads, t_in, in_grad) + if loop_state: + loop_state.ExitGradWhileContext(op, before=False) + + # Update pending count for the inputs of op and enqueue ready ops. + _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state, + xs_set) + + if loop_state: + loop_state.PostProcessing() + return [_GetGrad(grads, x, unconnected_gradients) for x in xs] + + +def _HasAnyNotNoneGrads(grads, op: ops.Operation): + """Return true iff op has real gradient.""" + out_grads = _GetGrads(grads, op) + for out_grad in out_grads: + if isinstance(out_grad, (tensor_lib.Tensor, indexed_slices.IndexedSlices)): + return True + if out_grad and isinstance(out_grad, collections_abc.Sequence): + if any(g is not None for g in out_grad): + return True + return False + + +def _UpdatePendingAndEnqueueReady( + grads, op: ops.Operation, queue, pending_count, loop_state, xs_set +): + """Update pending count for the inputs of op and enqueue ready ops.""" + for x in _NonEagerInputs(op, xs_set): + pending_count[x.op] -= 1 + ready = pending_count[x.op] == 0 + if loop_state and not ready: + ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op) + if ready: + if control_flow_util.IsLoopExit(x.op): + # if x is an exit without real gradient, defer processing them. + grad_state = loop_state.GetGradState(x.op, before=False) + grad_state.deferred_exits.append(x) + grad_state.pending_exits_count -= 1 + if grad_state.pending_exits_count == 0: + # We now have all the exits so process them. + has_not_none_grad = False + for y in grad_state.deferred_exits: + if _HasAnyNotNoneGrads(grads, y.op): + has_not_none_grad = True + queue.append(y.op) + else: + grad_state.unused_exits.append(y) + if has_not_none_grad: + # For an unused exit, if it has trainable outputs, backprop + # a zero gradient. Otherwise, just ignore it. + for y in grad_state.unused_exits: + if backprop_util.IsTrainable(y): + _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) + queue.append(y.op) + else: + # All exits are "unused" so use None as gradient. + for y in grad_state.unused_exits: + queue.append(y.op) + else: + queue.append(x.op) + + +def _SetGrad(grads, t, grad): + """Sets gradient "grad" in "grads" for tensor "t".""" + op = t.op + op_grads = grads.get(op) + if not op_grads: + op_grads = [[] for _ in range(len(op.outputs))] + grads[op] = op_grads + t_grads = op_grads[t.value_index] + if isinstance(t_grads, list): + t_grads.append(grad) + else: + assert control_flow_util.IsLoopSwitch(op) + op_grads[t.value_index] = grad + + +def _ZerosLike(t): + t_dtype = default_gradient.get_zeros_dtype(t) + if t.dtype == dtypes.resource: + return array_ops.zeros( + resource_variable_ops.variable_shape(t), dtype=t_dtype) + else: + return array_ops.zeros_like(t, dtype=t_dtype) + + +def _GetGrad(grads, t, unconnected_gradients): + """Gets gradient for tensor "t".""" + op = t.op + op_grads = grads.get(op) + if not op_grads: + if unconnected_gradients == UnconnectedGradients.ZERO: + return _ZerosLike(t) + elif unconnected_gradients == UnconnectedGradients.NONE: + return None + else: + raise ValueError( + f"Unknown value for unconnected_gradients: '{unconnected_gradients}'") + + t_grad = op_grads[t.value_index] + # This can happen if some other output of `t.op` has non-None grad. + if unconnected_gradients == UnconnectedGradients.ZERO and t_grad is None: + return _ZerosLike(t) + + assert not isinstance( + t_grad, list), ("gradients list should have been aggregated by now.") + return t_grad + + +def _GetGrads(grads, op: ops.Operation): + """Gets all gradients for op.""" + if op in grads: + return grads[op] + else: + return [[] for _ in range(len(op.outputs))] + + +def _AccumulatorShape(inputs): + shape = tensor_shape.unknown_shape() + for i in inputs: + if isinstance(i, tensor_lib.Tensor): + shape = shape.merge_with(i.get_shape()) + return shape + + +def _LogOpGradients(op: ops.Operation, out_grads, in_grads): + """Log the in and out grads of an op.""" + logging.vlog(1, "Gradient for '" + op.name + "'") + + def _FilterGrad(x): + if x is None: + return False + if isinstance(x, (list, tuple)): + return bool(x) + else: + return True + + logging.vlog(1, " in --> %s", + ", ".join(x.name for x in out_grads if _FilterGrad(x))) + logging.vlog(1, " out --> %s", + ", ".join(x.name for x in in_grads if _FilterGrad(x))) + + +def _MultiDeviceAddN(tensor_list, gradient_uid): + """Adds tensors from potentially multiple devices.""" + # Basic function structure comes from control_flow_ops.group(). + # Sort tensors according to their devices. + tensors_on_device = collections.defaultdict(lambda: []) + for tensor in tensor_list: + tensors_on_device[tensor.device].append(tensor) + + # For each device, add the tensors on that device first. + # Then gather the partial sums from multiple devices. + # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. + # E.g., aggregate per GPU, then per task, and so on. + summands = [] + + def DeviceKey(dev): + return "" if dev is None else dev + + for dev in sorted(tensors_on_device, key=DeviceKey): + tensors = tensors_on_device[dev] + with ops._colocate_with_for_gradient( # pylint: disable=protected-access + tensors[0].op, + gradient_uid, + ignore_existing=True): + summands.append(math_ops.add_n(tensors)) + + return math_ops.add_n(summands) + + +@tf_export("AggregationMethod") +class AggregationMethod: + """A class listing aggregation methods used to combine gradients. + + Computing partial derivatives can require aggregating gradient + contributions. This class lists the various methods that can + be used to combine gradients in the graph. + + The following aggregation methods are part of the stable API for + aggregating gradients: + + * `ADD_N`: All of the gradient terms are summed as part of one + operation using the "AddN" op (see `tf.add_n`). This + method has the property that all gradients must be ready and + buffered separately in memory before any aggregation is performed. + * `DEFAULT`: The system-chosen default aggregation method. + + The following aggregation methods are experimental and may not + be supported in future releases: + + * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using + the "AddN" op. This method of summing gradients may reduce + performance, but it can improve memory utilization because the + gradients can be released earlier. + * `EXPERIMENTAL_ACCUMULATE_N`: Same as `EXPERIMENTAL_TREE`. + + Example usage when computing gradient: + + >>> @tf.function + ... def example(): + ... x = tf.constant(1.0) + ... y = x * 2.0 + ... z = y + y + y + y + ... return tf.gradients(z, [x, y], + ... aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + >>> example() + [, + ] + + """ + ADD_N = 0 + DEFAULT = ADD_N + # The following are experimental and may not be supported in future releases. + EXPERIMENTAL_TREE = 1 + EXPERIMENTAL_ACCUMULATE_N = 2 # An alias for EXPERIMENTAL_ADD_N = 1 + + +def _AggregatedGrads(grads, + op, + gradient_uid, + loop_state, + aggregation_method=None): + """Get the aggregated gradients for op. + + Args: + grads: The map of memoized gradients. + op: The op to get gradients for. + gradient_uid: A unique identifier within the graph indicating + which invocation of gradients is being executed. Used to cluster + ops for compilation. + loop_state: An object for maintaining the state of the while loops in the + graph. It is of type ControlFlowState. None if the graph + contains no while loops. + aggregation_method: Specifies the method used to combine gradient terms. + Accepted values are constants defined in the class `AggregationMethod`. + + Returns: + A list of gradients, one per each output of `op`. If the gradients + for a particular output is a list, this function aggregates it + before returning. + + Raises: + TypeError: if the incoming grads are not Tensors or IndexedSlices. + ValueError: if the arguments are invalid. + + """ + if aggregation_method is None: + aggregation_method = AggregationMethod.DEFAULT + valid_aggregation_methods = [ + AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE, + AggregationMethod.EXPERIMENTAL_ACCUMULATE_N] + if aggregation_method not in valid_aggregation_methods: + raise ValueError( + f"Invalid `aggregation_method` specified {aggregation_method}. " + f"Accepted values are {valid_aggregation_methods}.") + out_grads = _GetGrads(grads, op) + for i, out_grad in enumerate(out_grads): + if loop_state: + if isinstance( + out_grad, (tensor_lib.Tensor, indexed_slices.IndexedSlices)): + assert control_flow_util.IsLoopSwitch(op) + continue + # Grads have to be Tensors or IndexedSlices + if (isinstance(out_grad, collections_abc.Sequence) and not all( + isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) + for g in out_grad + if g is not None)): + raise TypeError(f"Invalid gradient {out_grad} [index = {i}]. Gradients " + "have to be either all Tensors or all IndexedSlices") + # Aggregate multiple gradients, and convert [] to None. + if out_grad: + if len(out_grad) < 2: + used = "nop" + out_grads[i] = out_grad[0] + elif all( + isinstance(g, tensor_lib.Tensor) for g in out_grad if g is not None): + tensor_shape = _AccumulatorShape(out_grad) + if aggregation_method in [ + AggregationMethod.EXPERIMENTAL_TREE, + AggregationMethod.EXPERIMENTAL_ACCUMULATE_N + ]: + # Aggregate all gradients by doing pairwise sums: this may + # reduce performance, but it can improve memory because the + # gradients can be released earlier. + # + # TODO(vrv): Consider replacing this with a version of + # tf.AddN() that eagerly frees its inputs as soon as they are + # ready, so the order of this tree does not become a problem. + used = "tree" + with ops.name_scope(op.name + "_gradient_sum"): + running_sum = out_grad[0] + for grad in out_grad[1:]: + running_sum = math_ops.add_n([running_sum, grad]) + out_grads[i] = running_sum + else: + used = "add_n" + out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid) + logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad), + tensor_shape, used) + else: + out_grads[i] = backprop_util.AggregateIndexedSlicesGradients(out_grad) # pylint: disable=protected-access + else: # not out_grad + # out_grads[i] is [], thus its aggregation is simply None. + out_grads[i] = None + return out_grads + + +# Represents the output of TFE_Py_TapeSetPossibleGradientTypes. Real enums are +# unfortunately too slow to use here. +POSSIBLE_GRADIENT_TYPES_NONE = 0 +POSSIBLE_GRADIENT_TYPES_FIRST_ORDER = 1 +POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER = 2 + + +def PossibleTapeGradientTypes(tensors): + """Determines whether and how `args` may require tape gradients.""" + return pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes(tensors) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/image_grad_test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/image_grad_test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..fc84f3054f96560d997cae20778517650f42e70e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/image_grad_test_base.py @@ -0,0 +1,648 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Python ops defined in image_grad.py.""" + +from absl.testing import parameterized +import numpy as np + +from tensorflow.python.eager import backprop +from tensorflow.python.eager import context +from tensorflow.python.framework import config +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl +from tensorflow.python.framework import test_util +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import gen_image_ops +from tensorflow.python.ops import gradient_checker_v2 +from tensorflow.python.ops import image_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import test + + +@test_util.for_all_test_methods(test_util.disable_xla, + 'align_corners=False not supported by XLA') +class ResizeNearestNeighborOpTestBase(test.TestCase): + + TYPES = [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype] + + def testShapeIsCorrectAfterOp(self): + in_shape = [1, 2, 2, 1] + out_shape = [1, 4, 6, 1] + + for nptype in self.TYPES: + x = np.arange(0, 4).reshape(in_shape).astype(nptype) + + input_tensor = constant_op.constant(x, shape=in_shape) + resize_out = image_ops.resize_nearest_neighbor(input_tensor, + out_shape[1:3]) + with self.cached_session(): + self.assertEqual(out_shape, list(resize_out.get_shape())) + resize_out = self.evaluate(resize_out) + self.assertEqual(out_shape, list(resize_out.shape)) + + def testGradFromResizeToLargerInBothDims(self): + in_shape = [1, 2, 3, 1] + out_shape = (1, 4, 6, 1) + + for nptype in self.TYPES: + x = np.arange(0, 6).reshape(in_shape).astype(nptype) + + def resize_nn(t, shape=out_shape): + return image_ops.resize_nearest_neighbor(t, shape[1:3]) + + with self.cached_session(): + input_tensor = constant_op.constant(x, shape=in_shape) + err = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient( + resize_nn, [input_tensor], delta=1 / 8)) + self.assertLess(err, 1e-3) + + def testGradFromResizeToSmallerInBothDims(self): + in_shape = [1, 4, 6, 1] + out_shape = (1, 2, 3, 1) + + for nptype in self.TYPES: + x = np.arange(0, 24).reshape(in_shape).astype(nptype) + + def resize_nn(t, shape=out_shape): + return image_ops.resize_nearest_neighbor(t, shape[1:3]) + + with self.cached_session(): + input_tensor = constant_op.constant(x, shape=in_shape) + err = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient( + resize_nn, [input_tensor], delta=1 / 8)) + self.assertLess(err, 1e-3) + + def testCompareGpuVsCpu(self): + in_shape = [1, 4, 6, 3] + out_shape = (1, 8, 16, 3) + + for nptype in self.TYPES: + x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype) + for align_corners in [True, False]: + + def resize_nn(t, shape=out_shape, align_corners=align_corners): + return image_ops.resize_nearest_neighbor( + t, shape[1:3], align_corners=align_corners) + + with self.cached_session(use_gpu=False): + input_tensor = constant_op.constant(x, shape=in_shape) + grad_cpu = gradient_checker_v2.compute_gradient( + resize_nn, [input_tensor], delta=1 / 8) + + with self.cached_session(): + input_tensor = constant_op.constant(x, shape=in_shape) + grad_gpu = gradient_checker_v2.compute_gradient( + resize_nn, [input_tensor], delta=1 / 8) + + self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5) + + +class ResizeBilinearOpTestBase(test.TestCase, parameterized.TestCase): + + def _itGen(self, smaller_shape, larger_shape): + up_sample = (smaller_shape, larger_shape) + down_sample = (larger_shape, smaller_shape) + pass_through = (larger_shape, larger_shape) + shape_pairs = (up_sample, down_sample, pass_through) + # Align corners is deprecated in TF2.0, but align_corners==False is not + # supported by XLA. + options = [(True, False)] + if not test_util.is_xla_enabled(): + options += [(False, True), (False, False)] + for align_corners, half_pixel_centers in options: + for in_shape, out_shape in shape_pairs: + yield in_shape, out_shape, align_corners, half_pixel_centers + + def _getJacobians(self, + in_shape, + out_shape, + align_corners=False, + half_pixel_centers=False, + dtype=np.float32, + use_gpu=False, + force_gpu=False): + with self.cached_session(use_gpu=use_gpu, force_gpu=force_gpu): + # Input values should not influence gradients + x = np.arange(np.prod(in_shape)).reshape(in_shape).astype(dtype) + input_tensor = constant_op.constant(x, shape=in_shape) + + def func(in_tensor): + return image_ops.resize_bilinear( + in_tensor, + out_shape[1:3], + align_corners=align_corners, + half_pixel_centers=half_pixel_centers) + + return gradient_checker_v2.compute_gradient(func, [input_tensor]) + + @parameterized.parameters(set((True, context.executing_eagerly()))) + def _testShapesParameterized(self, use_tape): + + TEST_CASES = [[1, 1], [2, 3], [5, 4]] # pylint: disable=invalid-name + + for batch_size, channel_count in TEST_CASES: + smaller_shape = [batch_size, 2, 3, channel_count] + larger_shape = [batch_size, 4, 6, channel_count] + for in_shape, out_shape, _, _ in self._itGen(smaller_shape, larger_shape): + with test_util.AbstractGradientTape(use_tape=use_tape) as tape: + # Input values should not influence shapes + x = np.arange(np.prod(in_shape)).reshape(in_shape).astype(np.float32) + input_tensor = constant_op.constant(x, shape=in_shape) + tape.watch(input_tensor) + resized_tensor = image_ops.resize_bilinear(input_tensor, + out_shape[1:3]) + self.assertEqual(out_shape, list(resized_tensor.get_shape())) + + grad_tensor = tape.gradient(resized_tensor, input_tensor) + self.assertEqual(in_shape, list(grad_tensor.get_shape())) + with self.cached_session(): + resized_values = self.evaluate(resized_tensor) + self.assertEqual(out_shape, list(resized_values.shape)) + grad_values = self.evaluate(grad_tensor) + self.assertEqual(in_shape, list(grad_values.shape)) + + @parameterized.parameters({ + 'batch_size': 1, + 'channel_count': 1 + }, { + 'batch_size': 4, + 'channel_count': 3 + }, { + 'batch_size': 3, + 'channel_count': 2 + }) + def testGradients(self, batch_size, channel_count): + smaller_shape = [batch_size, 2, 3, channel_count] + larger_shape = [batch_size, 5, 6, channel_count] + for in_shape, out_shape, align_corners, half_pixel_centers in \ + self._itGen(smaller_shape, larger_shape): + jacob_a, jacob_n = self._getJacobians(in_shape, out_shape, align_corners, + half_pixel_centers) + threshold = 5e-3 + self.assertAllClose(jacob_a, jacob_n, threshold, threshold) + + def testTypes(self): + in_shape = [1, 4, 6, 1] + out_shape = [1, 2, 3, 1] + for use_gpu in [False, True]: + for dtype in [ + np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype + ]: + jacob_a, jacob_n = self._getJacobians( + in_shape, out_shape, dtype=dtype, use_gpu=use_gpu) + if dtype in (np.float16, dtypes.bfloat16.as_numpy_dtype): + # Compare fp16/bf16 analytical gradients to fp32 numerical gradients, + # since fp16/bf16 numerical gradients are too imprecise unless great + # care is taken with choosing the inputs and the delta. This is + # a weaker, but pragmatic, check (in particular, it does not test + # the op itself, only its gradient). + _, jacob_n = self._getJacobians( + in_shape, out_shape, dtype=np.float32, use_gpu=use_gpu) + threshold = 1e-3 + if dtype == np.float64: + threshold = 1e-5 + self.assertAllClose(jacob_a, jacob_n, threshold, threshold) + + @parameterized.parameters(set((True, context.executing_eagerly()))) + def testGradOnUnsupportedType(self, use_tape): + in_shape = [1, 4, 6, 1] + out_shape = [1, 2, 3, 1] + + with test_util.AbstractGradientTape(use_tape=use_tape) as tape: + x = np.arange(0, 24).reshape(in_shape).astype(np.uint8) + input_tensor = constant_op.constant(x, shape=in_shape) + tape.watch(input_tensor) + resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3]) + with self.cached_session(): + grad = tape.gradient(resize_out, [input_tensor]) + self.assertEqual([None], grad) + + def _gpuVsCpuCase(self, in_shape, out_shape, align_corners, + half_pixel_centers, dtype): + grad = {} + for use_gpu in [False, True]: + grad[use_gpu] = self._getJacobians( + in_shape, + out_shape, + align_corners, + half_pixel_centers, + dtype=dtype, + use_gpu=use_gpu) + threshold = 1e-4 + # Note that this is comparing both analytical and numerical Jacobians + self.assertAllClose(grad[False], grad[True], rtol=threshold, atol=threshold) + + @parameterized.parameters({ + 'batch_size': 1, + 'channel_count': 1 + }, { + 'batch_size': 2, + 'channel_count': 3 + }, { + 'batch_size': 5, + 'channel_count': 4 + }) + def testCompareGpuVsCpu(self, batch_size, channel_count): + smaller_shape = [batch_size, 4, 6, channel_count] + larger_shape = [batch_size, 8, 16, channel_count] + for params in self._itGen(smaller_shape, larger_shape): + self._gpuVsCpuCase(*params, dtype=np.float32) + + def testCompareGpuVsCpuFloat64(self): + in_shape = [1, 5, 7, 1] + out_shape = [1, 9, 11, 1] + # Note that there is no 16-bit floating-point format registered for GPU + self._gpuVsCpuCase( + in_shape, + out_shape, + align_corners=True, + half_pixel_centers=False, + dtype=np.float64) + + +class ResizeBicubicOpTestBase(test.TestCase, parameterized.TestCase): + """Tests resize bicubic ops.""" + + def testShapeIsCorrectAfterOp(self): + in_shape = [1, 2, 2, 1] + out_shape = [1, 4, 6, 1] + + x = np.arange(0, 4).reshape(in_shape).astype(np.float32) + + for align_corners in [True, False]: + input_tensor = constant_op.constant(x, shape=in_shape) + resize_out = image_ops.resize_bicubic( + input_tensor, out_shape[1:3], align_corners=align_corners) + with self.cached_session(): + self.assertEqual(out_shape, list(resize_out.get_shape())) + resize_out = self.evaluate(resize_out) + self.assertEqual(out_shape, list(resize_out.shape)) + + def testGradFromResizeToLargerInBothDims(self): + in_shape = [1, 2, 3, 1] + out_shape = [1, 4, 6, 1] + + x = np.arange(0, 6).reshape(in_shape).astype(np.float32) + input_tensor = constant_op.constant(x, shape=in_shape) + + for align_corners in [True, False]: + + def func(input_tensor, align_corners=align_corners): + return image_ops.resize_bicubic( + input_tensor, out_shape[1:3], align_corners=align_corners) + + with self.cached_session(): + err = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient(func, [input_tensor])) + + self.assertLess(err, 1e-3) + + def testGradFromResizeToSmallerInBothDims(self): + in_shape = [1, 4, 6, 1] + out_shape = [1, 2, 3, 1] + + x = np.arange(0, 24).reshape(in_shape).astype(np.float32) + input_tensor = constant_op.constant(x, shape=in_shape) + + for align_corners in [True, False]: + + def func(input_tensor, align_corners=align_corners): + return image_ops.resize_bicubic( + input_tensor, out_shape[1:3], align_corners=align_corners) + + with self.cached_session(): + err = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient(func, [input_tensor])) + + self.assertLess(err, 1e-3) + + @parameterized.parameters(set((True, context.executing_eagerly()))) + def testGradOnUnsupportedType(self, use_tape): + with test_util.AbstractGradientTape(use_tape=use_tape) as tape: + in_shape = [1, 4, 6, 1] + out_shape = [1, 2, 3, 1] + + x = np.arange(0, 24).reshape(in_shape).astype(np.uint8) + input_tensor = constant_op.constant(x, shape=in_shape) + tape.watch(input_tensor) + + resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3]) + with self.cached_session(): + grad = tape.gradient(resize_out, [input_tensor]) + self.assertEqual([None], grad) + + +class ScaleAndTranslateOpTestBase(test.TestCase): + """Tests scale and translate op.""" + + def testGrads(self): + in_shape = [1, 2, 3, 1] + out_shape = [1, 4, 6, 1] + + x = np.arange(0, 6).reshape(in_shape).astype(np.float32) + + kernel_types = [ + 'lanczos1', 'lanczos3', 'lanczos5', 'gaussian', 'box', 'triangle', + 'keyscubic', 'mitchellcubic' + ] + scales = [(1.0, 1.0), (0.37, 0.47), (2.1, 2.1)] + translations = [(0.0, 0.0), (3.14, 1.19), (2.1, 3.1), (100.0, 200.0)] + for scale in scales: + for translation in translations: + for kernel_type in kernel_types: + for antialias in [True, False]: + with self.cached_session(): + input_tensor = constant_op.constant(x, shape=in_shape) + + def scale_trans(input_tensor, + scale=scale, + translation=translation, + kernel_type=kernel_type, + antialias=antialias): + # pylint: disable=cell-var-from-loop + return image_ops.scale_and_translate( + input_tensor, + out_shape[1:3], + scale=constant_op.constant(scale), + translation=constant_op.constant(translation), + kernel_type=kernel_type, + antialias=antialias) + + err = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient(scale_trans, + [input_tensor])) + + self.assertLess(err, 1e-3) + + def testIdentityGrads(self): + """Tests that Gradients for 1.0 scale should be ones for some kernels.""" + in_shape = [1, 2, 3, 1] + out_shape = [1, 4, 6, 1] + + x = np.arange(0, 6).reshape(in_shape).astype(np.float32) + + kernel_types = ['lanczos1', 'lanczos3', 'lanczos5', 'triangle', 'keyscubic'] + scale = (1.0, 1.0) + translation = (0.0, 0.0) + antialias = True + for kernel_type in kernel_types: + with self.cached_session(): + input_tensor = constant_op.constant(x, shape=in_shape) + with backprop.GradientTape() as tape: + tape.watch(input_tensor) + scale_and_translate_out = image_ops.scale_and_translate( + input_tensor, + out_shape[1:3], + scale=constant_op.constant(scale), + translation=constant_op.constant(translation), + kernel_type=kernel_type, + antialias=antialias) + grad = tape.gradient(scale_and_translate_out, input_tensor)[0] + grad_v = self.evaluate(grad) + self.assertAllClose(np.ones_like(grad_v), grad_v) + + +class CropAndResizeOpTestBase(test.TestCase): + + def testShapeIsCorrectAfterOp(self): + batch = 2 + image_height = 3 + image_width = 4 + crop_height = 4 + crop_width = 5 + depth = 2 + num_boxes = 2 + + image_shape = [batch, image_height, image_width, depth] + crop_size = [crop_height, crop_width] + crops_shape = [num_boxes, crop_height, crop_width, depth] + + image = np.arange(0, batch * image_height * image_width * + depth).reshape(image_shape).astype(np.float32) + boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32) + box_ind = np.array([0, 1], dtype=np.int32) + + crops = image_ops.crop_and_resize( + constant_op.constant(image, shape=image_shape), + constant_op.constant(boxes, shape=[num_boxes, 4]), + constant_op.constant(box_ind, shape=[num_boxes]), + constant_op.constant(crop_size, shape=[2])) + with self.session(): + self.assertEqual(crops_shape, list(crops.get_shape())) + crops = self.evaluate(crops) + self.assertEqual(crops_shape, list(crops.shape)) + + def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples): + """Generate samples that are far enough from a set of anchor points. + + We generate uniform samples in [low, high], then reject those that are less + than radius away from any point in anchors. We stop after we have accepted + num_samples samples. + + Args: + low: The lower end of the interval. + high: The upper end of the interval. + anchors: A list of length num_crops with anchor points to avoid. + radius: Distance threshold for the samples from the anchors. + num_samples: How many samples to produce. + + Returns: + samples: A list of length num_samples with the accepted samples. + """ + self.assertTrue(low < high) + self.assertTrue(radius >= 0) + num_anchors = len(anchors) + # Make sure that at least half of the interval is not forbidden. + self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low)) + anchors = np.reshape(anchors, num_anchors) + samples = [] + while len(samples) < num_samples: + sample = np.random.uniform(low, high) + if np.all(np.fabs(sample - anchors) > radius): + samples.append(sample) + return samples + + def testGradRandomBoxes(self): + """Test that the gradient is correct for randomly generated boxes. + + The mapping is piecewise differentiable with respect to the box coordinates. + The points where the function is not differentiable are those which are + mapped to image pixels, i.e., the normalized y coordinates in + np.linspace(0, 1, image_height) and normalized x coordinates in + np.linspace(0, 1, image_width). Make sure that the box coordinates are + sufficiently far away from those rectangular grid centers that are points of + discontinuity, so that the finite difference Jacobian is close to the + computed one. + """ + np.random.seed(1) # Make it reproducible. + delta = 1e-3 + radius = 2 * delta + low, high = -0.5, 1.5 # Also covers the case of extrapolation. + + image_height = 4 + for image_width in range(1, 3): + for crop_height in range(1, 3): + for crop_width in range(2, 4): + for depth in range(1, 3): + for num_boxes in range(1, 3): + + batch = num_boxes + image_shape = [batch, image_height, image_width, depth] + crop_size = [crop_height, crop_width] + + image = np.arange(0, batch * image_height * image_width * + depth).reshape(image_shape).astype(np.float32) + boxes = [] + for _ in range(num_boxes): + # pylint: disable=unbalanced-tuple-unpacking + y1, y2 = self._randomUniformAvoidAnchors( + low, high, np.linspace(0, 1, image_height), radius, 2) + x1, x2 = self._randomUniformAvoidAnchors( + low, high, np.linspace(0, 1, image_width), radius, 2) + # pylint: enable=unbalanced-tuple-unpacking + boxes.append([y1, x1, y2, x2]) + + boxes = np.array(boxes, dtype=np.float32) + box_ind = np.arange(batch, dtype=np.int32) + + image_tensor = constant_op.constant(image, shape=image_shape) + boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4]) + box_ind_tensor = constant_op.constant(box_ind, shape=[num_boxes]) + + def crop_resize(image_tensor, boxes_tensor): + # pylint: disable=cell-var-from-loop + return image_ops.crop_and_resize( + image_tensor, boxes_tensor, box_ind_tensor, + constant_op.constant(crop_size, shape=[2])) + + with test_util.device(use_gpu=True): + with self.cached_session(): + # pylint: disable=cell-var-from-loop + if (config.is_op_determinism_enabled() and + test_util.is_gpu_available()): + with self.assertRaises(errors_impl.UnimplementedError): + gradient_checker_v2.compute_gradient( + lambda x: crop_resize(x, boxes_tensor), + [image_tensor]) + with self.assertRaises(errors_impl.UnimplementedError): + gradient_checker_v2.compute_gradient( + lambda x: crop_resize(image_tensor, x), + [boxes_tensor]) + else: + err1 = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient( + lambda x: crop_resize(x, boxes_tensor), + [image_tensor])) + err2 = gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient( + lambda x: crop_resize(image_tensor, x), + [boxes_tensor])) + err = max(err1, err2) + self.assertLess(err, 2e-3) + + +@test_util.run_all_in_graph_and_eager_modes +class RGBToHSVOpTestBase(test.TestCase): + + TYPES = [np.float32, np.float64] + + def testShapeIsCorrectAfterOp(self): + in_shape = [2, 20, 30, 3] + out_shape = [2, 20, 30, 3] + + for nptype in self.TYPES: + x = np.random.randint(0, high=255, size=[2, 20, 30, 3]).astype(nptype) + rgb_input_tensor = constant_op.constant(x, shape=in_shape) + hsv_out = gen_image_ops.rgb_to_hsv(rgb_input_tensor) + with self.cached_session(): + self.assertEqual(out_shape, list(hsv_out.get_shape())) + hsv_out = self.evaluate(hsv_out) + self.assertEqual(out_shape, list(hsv_out.shape)) + + def testRGBToHSVGradSimpleCase(self): + + def f(x): + return gen_image_ops.rgb_to_hsv(x) + + for nptype in self.TYPES: + # Building a simple input tensor to avoid any discontinuity + x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, + 0.9]]).astype(nptype) + rgb_input_tensor = constant_op.constant(x, shape=x.shape) + # Computing Analytical and Numerical gradients of f(x) + analytical, numerical = gradient_checker_v2.compute_gradient( + f, [rgb_input_tensor]) + self.assertAllClose(numerical, analytical, atol=1e-4) + + def testRGBToHSVGradRandomCase(self): + + def f(x): + return gen_image_ops.rgb_to_hsv(x) + + np.random.seed(0) + # Building a simple input tensor to avoid any discontinuity + x = np.random.rand(1, 5, 5, 3).astype(np.float32) + rgb_input_tensor = constant_op.constant(x, shape=x.shape) + # Computing Analytical and Numerical gradients of f(x) + self.assertLess( + gradient_checker_v2.max_error( + *gradient_checker_v2.compute_gradient(f, [rgb_input_tensor])), 1e-4) + + def testRGBToHSVGradSpecialCaseRGreatest(self): + # This test tests a specific subset of the input space + # with a dummy function implemented with native TF operations. + in_shape = [2, 10, 20, 3] + + def f(x): + return gen_image_ops.rgb_to_hsv(x) + + def f_dummy(x): + # This dummy function is a implementation of RGB to HSV using + # primitive TF functions for one particular case when R>G>B. + r = x[..., 0] + g = x[..., 1] + b = x[..., 2] + # Since MAX = r and MIN = b, we get the following h,s,v values. + v = r + s = 1 - math_ops.div_no_nan(b, r) + h = 60 * math_ops.div_no_nan(g - b, r - b) + h = h / 360 + return array_ops_stack.stack([h, s, v], axis=-1) + + # Building a custom input tensor where R>G>B + x_reds = np.ones((in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) + x_greens = 0.5 * np.ones( + (in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) + x_blues = 0.2 * np.ones( + (in_shape[0], in_shape[1], in_shape[2])).astype(np.float32) + x = np.stack([x_reds, x_greens, x_blues], axis=-1) + rgb_input_tensor = constant_op.constant(x, shape=in_shape) + + # Computing Analytical and Numerical gradients of f(x) + analytical, numerical = gradient_checker_v2.compute_gradient( + f, [rgb_input_tensor]) + # Computing Analytical and Numerical gradients of f_dummy(x) + analytical_dummy, numerical_dummy = gradient_checker_v2.compute_gradient( + f_dummy, [rgb_input_tensor]) + self.assertAllClose(numerical, analytical, atol=1e-4) + self.assertAllClose(analytical_dummy, analytical, atol=1e-4) + self.assertAllClose(numerical_dummy, numerical, atol=1e-4) + + +if __name__ == '__main__': + test.main() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_grad.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..10daa688d49ce268030212f756f1d42ba4152325 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_grad.py @@ -0,0 +1,1077 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradients for operators defined in linalg_ops.py. + +Useful reference for derivative formulas is (Mike Giles, 2008). + +Ionescu et al. (2015) provide a detailed derivation of formulas for +backpropagating through spectral layers (SVD and Eig). + +References: + An extended collection of matrix derivative results for + forward and reverse mode automatic differentiation: + [Mike Giles, 2008] + (https://ora.ox.ac.uk/objects/uuid:8d0c0a29-c92b-4153-a1d2-38b276e93124) + ([pdf](http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf)) + Matrix Backpropagation for Deep Networks with Structured Layers + [Ionescu et al., 2015] + (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/Ionescu_Matrix_Backpropagation_for_ICCV_2015_paper.html) + ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Ionescu_Matrix_Backpropagation_for_ICCV_2015_paper.pdf)) + Training Deep Networks with Structured Layers by Matrix Backpropagation: + [Ionescu et al., 2015](https://arxiv.org/abs/1509.07838) + ([pdf](https://arxiv.org/pdf/1509.07838.pdf)) +""" +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import gen_linalg_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as _linalg + + +@ops.RegisterGradient("MatrixInverse") +def _MatrixInverseGrad(op: ops.Operation, grad): + """Gradient for MatrixInverse.""" + ainv = op.outputs[0] + op_adjoint = op.get_attr("adjoint") + return -math_ops.matmul( # pylint: disable=invalid-unary-operand-type + ainv, + math_ops.matmul(grad, ainv, adjoint_a=op_adjoint, + adjoint_b=not op_adjoint), + adjoint_a=not op_adjoint) + + +@ops.RegisterGradient("Einsum") +def _EinsumGrad(op: ops.Operation, grad): + """Gradient for Einsum.""" + ellipsis = "..." + + def _GetAxisFromLabel(subscripts, label): + """Returns the axis (possibly negative) corresponding to a label. + + Returns the axis index of the axis label if it is before an ellipsis (or if + the ellipsis is not present), and the negative index if it occurs after the + ellipsis. E.g. index of `b` in `ab...cd`, is `1`, but that of `c` is `-2`. + + For multiple occurrences, returns the leftmost one. If not found, returns + None. + + Args: + subscripts: A string denoting the einsum subscript (e.g. `ab...cd`) + label: The single character axis label. + """ + splits = subscripts.split(ellipsis) + index = splits[0].find(label) + if index != -1: + return index + if len(splits) < 2: + return None + index = splits[1].find(label) + if index != -1: + return index - len(splits[1]) + return None + + def _GetBcastSubshape(subscripts): + """Returns a tuple denoting the slice mapping to ellipsis. + + For a given subscript, returns a tuple (start, end) denoting the start + axis index and the (negative) end axis index respectively. For any input + Tensor `x` described by the subscript, `x[start:end]` would be the slice + represented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`. + + If ellipsis is not present in `subscripts`, returns `(0, 0)`. + + Args: + subscripts: A string denoting the einsum subscript. + """ + start = subscripts.find(ellipsis) + if start == -1: + return 0, 0 + remaining = len(subscripts) - (start + len(ellipsis)) + end = -remaining if remaining > 0 else None + return start, end + + def _GetReducedSubscripts(reduced_label_set, input_shape, subscripts): + """Returns reduced subscripts and their corresponding dimensions and axes. + + Given a set of axis labels, returns their concatenated subscript, their + corresponding dimensions from input_shape, and their corresponding axes. + Note that the concatenated subscript `reduced_subs` may have axis labels + from `reduced_label_set` in any order. For example, for the reduced label + set `{b, d}`, subscripts `aabbcd` and input shape `[2,2,5,5,3,4]`, returns + subscripts `bd`, dimensions `[5,4]` and axes `[2,5]`. + + Args: + reduced_label_set: Set of axis labels which appear in `subscripts`. + input_shape: A `Tensor` representing the shape of the einsum operand + corresponding to `subscripts`. + subscripts: A string denoting the einsum subscript. + + Returns: + reduced_subs: Subscripts formed by a concatenation of labels in + `reduced_label_set`. + reduced_dims: Dimensions from `input_shape` corresponding to each label + in `reduced_subs`. + reduced_axes: Axes described by `subscripts` corresponding to each label + in `reduced_subs`. If there are multiple occurrences in `subscripts`, + we consider only the leftmost one. + + """ + # Concatenate the sequence of reduced axis labels. + reduced_subs = "".join(list(reduced_label_set)) + # Get the axis (may be positive, negative or zero) for each of the reduced + # labels. If the same label appears multiple times, get the left-most axis. + reduced_axes = [_GetAxisFromLabel(subscripts, s) for s in reduced_subs] + # Get the corresponding dimensions for each reduced axis. + reduced_dims = array_ops_stack.stack( + [input_shape[ax] for ax in reduced_axes]) + return reduced_subs, reduced_dims, reduced_axes + + def _GetGradReduced(output_grad, output_subs, input_subs, input_shape, + reduced_label_set): + """Returns the gradient wrt input for a unary einsum with reductions. + + Args: + output_grad: The gradient wrt the output of a unary einsum operation. + output_subs: The output subscript. (E.g. `ac` for equation `abc->ac`). + input_subs: The input subscript. (E.g. `abc` for equation `abc->ac`). + input_shape: A `Tensor` representing the shape of the input operand. + reduced_label_set: The set of axis labels appearing in `input_subs` but + not in `output_subs`. + """ + # Let's say the einsum operation was "aabbcd->ca", where axis labels 'b' and + # 'd' are reduced with input_shape [2,2,5,5,3,4]. Then obtain the reduced + # subscripts "bd", corresponding dimensions [5,4] and axes [2,5]. + reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts( + reduced_label_set, input_shape, input_subs) + # Whether either the input or the output subscripts have a repeated label. + # This is true for "aabbcd->ca" or "abd->cca" but false for "abcd->ca". + has_repeated_labels = ( + len(set(input_subs)) + len(set(output_subs)) < + len(input_subs) + len(output_subs)) + # Compute the input subscripts without the reduced axis labels, e.g. "aac" + # for the equation "aabbcd->ca". + input_subs_without_reduced_labels = "".join( + [s for s in input_subs if s not in reduced_label_set]) + + # The gradient wrt the input for the equation "abc->ac" (or, equivalently + # reduce_sum(..., axis=1)) is just the gradient of the output tiled N times + # along axis 1, where label 'b' represents a dimension of size N. + # + # If we're not dealing with repeated labels, and the non-reduced labels + # doesn't need to be transposed, then just tiling is enough and there is no + # need to call another einsum. For example, tiling is sufficient for + # "abcd->ac". But for equations like "aabbcd->ac" (generalized traces) or + # "abc->ca" (transpose), we'd need another einsum operation after tiling. + if (not has_repeated_labels and + input_subs_without_reduced_labels == output_subs): + # Obtain the shape of the output, as if keepdims=True on reduce sum. E.g. + # for the equation "abcd->ac" with input shape [2,5,3,4], we get the + # reduced shape [2,1,3,1]. + reduced_shape = math_ops.reduced_shape( + input_shape, ops.convert_to_tensor(reduced_axes)) + # Reshaping the gradient (wrt "ac") to [2,1,3,1] and broadcasting it to + # the shape [2,5,3,4] results in the gradient wrt "abcd". + return array_ops.broadcast_to( + array_ops.reshape(output_grad, reduced_shape), input_shape) + + # If we *do* have traces or transpose operations, then prepend the extra + # reduced dimensions to the front. E.g. Given the equation "aabbcd->ca" we'd + # first obtain the VJP for "bdca->ca", and then the VJP for "aabbcd->bdca". + # + # Obtain the input shape with reduced dimensions prepended, viz. [5,4,3,2]. + # This is the shape of the intermediate "bdca". + grad_shape_with_reduced_labels = array_ops.concat( + [reduced_dims, array_ops.shape(output_grad)], axis=0) + # Obtain the output shape of the reduction-only equation "bdca->ca" as if + # keepdims=True; viz. [1,1,3,2]. Since we prepended the reduced labels, we + # just have to prepend that many 1s to the output shape. + reduced_shape = ( + array_ops.concat([ + array_ops.ones(len(reduced_label_set), dtype=dtypes.int32), + array_ops.shape(output_grad) + ], + axis=0)) + # Compute the VJP for the intermediate (viz. "bdca->ca") for which + # broadcasting is sufficient. + broadcasted_grad = array_ops.broadcast_to( + array_ops.reshape(output_grad, reduced_shape), + grad_shape_with_reduced_labels) + # Compute the VJP for the final step (viz. "aabbcd->bdca"). We can use + # einsum with the input and output subscripts reversed (viz. "bdca->aabbcd") + # since the output axis labels now appear in the input subscripts. + return gen_linalg_ops.einsum([broadcasted_grad], + "{}->{}".format(reduced_subs + output_subs, + input_subs)) + + def _GetGradWrt(output_grad, other_operand, input_shape, input_subs, + other_subs, output_subs): + """Returns the gradient wrt an input operand for a binary einsum. + + This function does not handle (un)broadcasting. This must be done separately + on the returned gradient. + + Args: + output_grad: The gradient wrt the output of a binary einsum operation. + other_operand: The complementary `Tensor` operand i.e. which is not the + input operand. + input_shape: A `Tensor` representing the shape of input operand. + input_subs: The subscripts of the input operand. + other_subs: The subscripts of the complementary operand. + output_subs: The output subscripts. + """ + # Claim: For the einsum operation z = einsum("{eq_x},{eq_y}->{eq_z}", x, y), + # where the equation involves only Tensor contractions, generalized traces + # and transposes, the input gradients are given by the vector-jacobian + # products (VJPs): + # + # grad_wrt_x = einsum("{eq_y},{eq_z}->{eq_x}", y, grad_wrt_z) + # grad_wrt_y = einsum("{eq_x},{eq_z}->{eq_y}", x, grad_wrt_z} + # + # where grad_wrt_x and grad_wrt_y are the gradients with respect to inputs + # x and y and grad_wrt_z is the given gradient with respect to output z. + # + # Proof: For unary einsum equations involving only transpose ("ij->ji") and + # traces ("ii->i"), the linear mapping's Jacobian at input x is given + # by the function itself. We can verify that the linear map given by the + # VJP are einsums with the equations "ji->ij" and "i->ii" respectively, + # where the latter represents 'un-tracing', or filling the diagonal with + # the input axis and non-diagonal entries are zeros. + # Furthermore, recall that matrix multiplication, which is + # represented by the equation "ab,bc->ac", has its VJPs given by the + # einsum equations "ac,bc->ab" and "ab,ac->bc" (see, for example + # https://math.stackexchange.com/a/2755680). Combined with transposes and + # traces we can rewrite Tensor contractions as regular matrix + # multiplication. Since each of these operations have their VJPs described + # by einsums of the required pattern, the result follows. + # + # Accordingly, einsum operations except for those with reductions, e.g. + # "abc,cd->ad" have their VJPs defined by: + # "{output_subs},{other_subs}->{input_subs}". + # + # But if there is a reduction, this would lead to the equation "ad,cd->abc" + # which is invalid because the reduced axis label 'b' is present in the + # output but not in any of the inputs. Therefore, we compute the VJP in two + # steps: first we obtain VJP for "ac,cd->ad" and then we compute the VJP of + # "abc->ac" or, equivalently, reduce_sum(..., axis=1). + # + # Compute the set of input axis labels which doesn't appear in either the + # output subscripts or the other operand's subscript. E.g. the set {'b'} for + # the equation "abc,cd->ad". + reduced_label_set = set(input_subs).difference( + set(output_subs + other_subs + ".")) + # Obtain the input subscripts with the reduced axis labels removed. E.g. + # "ac" in the above example. + left_subs = "".join(s for s in input_subs if s not in reduced_label_set) + + # Compute the gradient wrt the input, without accounting for the operation + # "abc->ac". So, now we have the VJP of the operation "ac,cd->ad". + grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand], + "{},{}->{}".format( + output_subs, other_subs, + left_subs)) + # If the reduced_label_set is empty, then we already have the gradient + # wrt the input. + if not reduced_label_set: + return grad_reduced + # Otherwise, we currently have the gradient wrt the output of the reduction + # operation "abc->ac". Invoke the subroutine for the gradient for unary + # einsum with reductions. + return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, + reduced_label_set) + + equation = op.get_attr("equation") + if isinstance(equation, bytes): + equation = equation.decode() + input_subs, output_subs = equation.split("->") + + if len(op.inputs) == 1: + # For the unary einsum z = einsum("{eq_x}->{eq_z}", x), the gradient wrt the + # input (VJP) is given by the reversed equation: + # grad_wrt_x = einsum("{eq_z}->{eq_x}", grad_wrt_z) + # (See the justification in _GetGradWrt). This is valid unless there are + # reduced axis labels; i.e. axis labels appearing in the input but not in + # the output subscripts. + input_shape = array_ops.shape(op.inputs[0]) + # Find the axis labels which appear only in the input. + reduced_label_set = set(input_subs).difference(set(output_subs + ellipsis)) + if not reduced_label_set: + # Return the einsum given by the reversed equation, since we don't have + # reduced axes. + return gen_linalg_ops.einsum([grad], + "{}->{}".format(output_subs, input_subs)) + # We do have reduced axes, so we invoke the subroutine for reduced unary + # einsums. + return _GetGradReduced(grad, output_subs, input_subs, input_shape, + reduced_label_set) + + x_subs, y_subs = input_subs.split(",") + # Add ellipsis for broadcasted dimensions if any operand does not have it. + # This is because the equation "...ij,jk->ik" may be valid if the 0th input's + # batch shape is empty, but the VJP equation "jk,ik->...ij" is not valid + # because only the output subscripts contain ellipsis. + if ellipsis in output_subs: + if ellipsis not in x_subs: + x_subs += ellipsis + if ellipsis not in y_subs: + y_subs += ellipsis + + # Obtain the gradients wrt the inputs x and y, without taking into account + # the unbroadcasting. + x, y = op.inputs[0], op.inputs[1] + if grad.dtype.is_complex: + x = math_ops.conj(x) + y = math_ops.conj(y) + + x_shape = array_ops.shape(x) + y_shape = array_ops.shape(y) + grad_x = _GetGradWrt(grad, y, x_shape, x_subs, y_subs, output_subs) + grad_y = _GetGradWrt(grad, x, y_shape, y_subs, x_subs, output_subs) + + if ellipsis not in output_subs: + # If no ellipsis in the output; then no need to unbroadcast. + return grad_x, grad_y + + # Below we handle the case that broadcasting between x and y was necessary, + # with x and y having possibly different batch shapes. + + # Obtain the range of axes which map to ellipsis. E.g. for subscripts 'ab...c' + # and shape of rank 10; the range [3:-1] denotes the broadcasted axes. + bx_start, bx_end = _GetBcastSubshape(x_subs) + by_start, by_end = _GetBcastSubshape(y_subs) + # If the static batch shapes are equal, we don't need to unbroadcast. + x_shape_static = x.get_shape() + y_shape_static = y.get_shape() + if (x_shape_static.is_fully_defined() and + y_shape_static.is_fully_defined() and + x_shape_static[bx_start:bx_end] == y_shape_static[by_start:by_end]): + return grad_x, grad_y + + # Sum the gradient across the broadcasted axes. + rx, ry = array_ops.broadcast_gradient_args(x_shape[bx_start:bx_end], + y_shape[by_start:by_end]) + grad_x = array_ops.reshape( + math_ops.reduce_sum(grad_x, bx_start + rx), x_shape) + grad_y = array_ops.reshape( + math_ops.reduce_sum(grad_y, by_start + ry), y_shape) + return grad_x, grad_y + + +@ops.RegisterGradient("MatrixDeterminant") +def _MatrixDeterminantGrad(op: ops.Operation, grad): + """Gradient for MatrixDeterminant.""" + a = op.inputs[0] + c = op.outputs[0] + a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True) + multipliers = array_ops.reshape(grad * c, + array_ops.concat([array_ops.shape(c), [1, 1]], + 0)) + return multipliers * a_adj_inv + + +@ops.RegisterGradient("MatrixSquareRoot") +def _MatrixSquareRootGrad(op: ops.Operation, grad): + """Gradient for MatrixSquareRoot.""" + + # Let A be an m x m square matrix (or batch of matrices) + # Let R = sqrtm(A) + # By definition, A = RR + # Take the differential: dA = d(RR) = RdR + dRR + # Solve the resulting Sylvester equation for dR + + # Used to find Kronecker products within the Sylvester equation + def _KroneckerProduct(b1, b2): + """Computes the Kronecker product of two batches of square matrices.""" + b1_shape = array_ops.shape(b1) + b2_shape = array_ops.shape(b2) + b1_order = b1_shape[-1] + b2_order = b2_shape[-1] + + shape_slice_size = [math_ops.subtract(array_ops.size(b1_shape), 2)] + shape_slice = array_ops.slice(b1_shape, [0], + shape_slice_size) # Same for both batches + b1_reshape_shape = array_ops.concat( + [shape_slice, [b1_order], [1], [b1_order], [1]], 0) + b2_reshape_shape = array_ops.concat( + [shape_slice, [1], [b2_order], [1], [b2_order]], 0) + + b1_reshape = array_ops.reshape(b1, b1_reshape_shape) + b2_reshape = array_ops.reshape(b2, b2_reshape_shape) + + order_prod = b1_order * b2_order + kprod_shape = array_ops.concat([shape_slice, [order_prod], [order_prod]], 0) + return array_ops.reshape(b1_reshape * b2_reshape, kprod_shape) + + sqrtm = op.outputs[0] # R + shape = array_ops.shape(sqrtm) + order = shape[-1] # m + matrix_count = math_ops.reduce_prod(shape[0:-2]) + + # Get batch of m x m identity matrices + eye = linalg_ops.eye(order, dtype=sqrtm.dtype) # m x m identity matrix + eye_flat = array_ops.reshape(eye, [-1]) + eye_tiled = array_ops.tile(eye_flat, [matrix_count]) + eye_batch = array_ops.reshape(eye_tiled, shape) + + # The transpose of R is taken in the k1 term instead of k2 in + # order to prevent redundant transposition of R (i.e. (R')' = R) + sqrtm_transpose = array_ops.matrix_transpose(sqrtm) + k1 = _KroneckerProduct(eye_batch, sqrtm_transpose) + k2 = _KroneckerProduct(sqrtm, eye_batch) + ksum = math_ops.add(k1, k2) + + # Vectorize dA + shape_slice_size = [math_ops.subtract(array_ops.size(shape), 2)] + shape_slice = array_ops.slice(shape, [0], shape_slice_size) + shape_vec_da = array_ops.concat([shape_slice, [order * order], [1]], 0) + vec_da = array_ops.reshape(array_ops.matrix_transpose(grad), shape_vec_da) + + # Solve for vec(dR) + vec_dsqrtm = linalg_ops.matrix_solve(ksum, vec_da) + + # Solve for dR by inverse vectorizing vec(dR) + dsqrtm_transpose = array_ops.reshape(vec_dsqrtm, shape) + return array_ops.matrix_transpose(dsqrtm_transpose) + + +@ops.RegisterGradient("LogMatrixDeterminant") +def _LogMatrixDeterminantGrad(op: ops.Operation, _, grad_b): + """Gradient for LogMatrixDeterminant.""" + a = op.inputs[0] + c = op.outputs[1] + a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True) + multipliers = array_ops.reshape( + grad_b, array_ops.concat([array_ops.shape(c), [1, 1]], 0)) + return multipliers * a_adj_inv + + +@ops.RegisterGradient("Cholesky") +def _CholeskyGrad(op: ops.Operation, grad): + """Gradient for Cholesky.""" + + # Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1} + l = op.outputs[0] + num_rows = array_ops.shape(l)[-1] + batch_shape = array_ops.shape(l)[:-2] + l_inverse = linalg_ops.matrix_triangular_solve(l, + linalg_ops.eye( + num_rows, + batch_shape=batch_shape, + dtype=l.dtype)) + + middle = math_ops.matmul(l, grad, adjoint_a=True) + middle = array_ops.matrix_set_diag(middle, + 0.5 * array_ops.matrix_diag_part(middle)) + middle = array_ops.matrix_band_part(middle, -1, 0) + + grad_a = math_ops.matmul( + math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse) + + grad_a += _linalg.adjoint(grad_a) + return grad_a * 0.5 + + +@ops.RegisterGradient("Qr") +def _QrGrad(op: ops.Operation, dq, dr): + """Gradient for Qr.""" + + # The methodology is explained in detail in https://arxiv.org/abs/2009.10071 + # QR and LQ Decomposition Matrix Backpropagation Algorithms for + # Square, Wide, and Deep, Real and Complex, Matrices and Their Software + # Implementation + q, r = op.outputs + if (r.shape.ndims is None or r.shape.as_list()[-2] is None or + r.shape.as_list()[-1] is None): + raise NotImplementedError("QrGrad not implemented with dynamic shapes. " + f"Received r.shape: {r.shape}") + if (r.shape.dims[-2].value > r.shape.dims[-1].value and + q.shape.dims[-2].value == q.shape.dims[-1].value): + raise NotImplementedError("QrGrad not implemented when nrows > ncols " + "and full_matrices is true. Received r.shape=" + f"{r.shape} with nrows={r.shape.dims[-2]}" + f"and ncols={r.shape.dims[-1]}.") + + def _TriangularSolve(x, r): + """Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri.""" + return _linalg.adjoint( + linalg_ops.matrix_triangular_solve( + r, _linalg.adjoint(x), lower=False, adjoint=False)) + + def _QrGradSquareAndDeepMatrices(q, r, dq, dr): + """Gradient for matrix orders num_rows >= num_cols + and full_matrices is false. + """ + qdq = math_ops.matmul(q, dq, adjoint_a=True) + qdq_ = qdq - _linalg.adjoint(qdq) + rdr = math_ops.matmul(r, dr, adjoint_b=True) + rdr_ = rdr - _linalg.adjoint(rdr) + tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0) + + grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r)) + grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r) + ret = grad_a + grad_b + + if q.dtype.is_complex: + # need to add a correction to the gradient formula for complex case + m = rdr - _linalg.adjoint(qdq) + eyem = _linalg.set_diag(array_ops.zeros_like(m), _linalg.diag_part(m)) + correction = eyem - math_ops.cast(math_ops.real(eyem), q.dtype) + ret = ret + _TriangularSolve( + math_ops.matmul(q, _linalg.adjoint(correction)), r) + + return ret + + num_rows, num_cols = q.shape.dims[-2].value, r.shape.dims[-1] + + if num_rows >= num_cols: + return _QrGradSquareAndDeepMatrices(q, r, dq, dr) + + # Partition a = [x, y], r = [u, v] and reduce to the square case + a = op.inputs[0] + y = a[..., :, num_rows:] + u = r[..., :, :num_rows] + dv = dr[..., :, num_rows:] + du = dr[..., :, :num_rows] + dy = math_ops.matmul(q, dv) + dx = _QrGradSquareAndDeepMatrices(q, u, + dq + math_ops.matmul(y, dv, adjoint_b=True), + du) + return array_ops.concat([dx, dy], axis=-1) + + +@ops.RegisterGradient("MatrixSolve") +def _MatrixSolveGrad(op: ops.Operation, grad): + """Gradient for MatrixSolve.""" + a = op.inputs[0] + adjoint_a = op.get_attr("adjoint") + c = op.outputs[0] + grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a) + if adjoint_a: + grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + else: + grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + return (grad_a, grad_b) + + +@ops.RegisterGradient("MatrixSolveLs") +def _MatrixSolveLsGrad(op: ops.Operation, grad): + """Gradients for MatrixSolveLs.""" + + # TODO(rmlarsen): The implementation could be more efficient: + # a) Output the Cholesky factorization from forward op instead of + # recomputing it here. + # b) Implement a symmetric rank-k update op instead of computing + # x*z + transpose(x*z). This pattern occurs other places in TensorFlow. + # pylint: disable=g-doc-args + def _Overdetermined(op: ops.Operation, grad): + """Gradients for the overdetermined case of MatrixSolveLs. + + This is the backprop for the solution to the normal equations of the first + kind: + X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B + which solve the least squares problem + min ||A * X - B||_F^2 + lambda ||X||_F^2. + """ + a = op.inputs[0] + b = op.inputs[1] + x = op.outputs[0] + l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype) + # pylint: disable=protected-access + chol = linalg_ops._RegularizedGramianCholesky( + a, l2_regularizer=l2_regularizer, first_kind=True) + # pylint: enable=protected-access + # Temporary z = (A^T * A + lambda * I)^{-1} * grad. + z = linalg_ops.cholesky_solve(chol, grad) + xzt = math_ops.matmul(x, z, adjoint_b=True) + zx_sym = xzt + array_ops.matrix_transpose(xzt) + grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + grad_b = math_ops.matmul(a, z) + return (grad_a, grad_b, None) + + # pylint: disable=g-doc-args + def _Underdetermined(op: ops.Operation, grad): + """Gradients for the underdetermined case of MatrixSolveLs. + + This is the backprop for the solution to the normal equations of the second + kind: + X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B + that (for lambda=0) solve the least squares problem + min ||X||_F subject to A*X = B. + """ + a = op.inputs[0] + b = op.inputs[1] + l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype) + # pylint: disable=protected-access + chol = linalg_ops._RegularizedGramianCholesky( + a, l2_regularizer=l2_regularizer, first_kind=False) + # pylint: enable=protected-access + grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad)) + # Temporary tmp = (A * A^T + lambda * I)^{-1} * B. + tmp = linalg_ops.cholesky_solve(chol, b) + a1 = math_ops.matmul(tmp, a, adjoint_a=True) + a1 = -math_ops.matmul(grad_b, a1) # pylint: disable=invalid-unary-operand-type + a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True) + a2 = math_ops.matmul(tmp, a2, adjoint_b=True) + grad_a = a1 + a2 + return (grad_a, grad_b, None) + + fast = op.get_attr("fast") + if fast is False: + raise ValueError("Gradient not defined for fast=False") + matrix_shape = op.inputs[0].get_shape()[-2:] + if matrix_shape.is_fully_defined(): + if matrix_shape[-2] >= matrix_shape[-1]: + return _Overdetermined(op, grad) + else: + return _Underdetermined(op, grad) + else: + # We have to defer determining the shape to runtime and use + # conditional execution of the appropriate graph. + matrix_shape = array_ops.shape(op.inputs[0])[-2:] + return cond.cond(matrix_shape[-2] >= matrix_shape[-1], + lambda: _Overdetermined(op, grad), + lambda: _Underdetermined(op, grad)) + + +@ops.RegisterGradient("BandedTriangularSolve") +def _BandedTriangularSolveGrad(op: ops.Operation, grad): + """Gradient for BandedTriangularSolve.""" + a = op.inputs[0] + b = op.inputs[1] + num_bands = array_ops.shape(a)[-2] + adjoint_a = op.get_attr("adjoint") + lower_a = op.get_attr("lower") + c = op.outputs[0] + grad_b = linalg_ops.banded_triangular_solve( + a, grad, lower=lower_a, adjoint=not adjoint_a) + if adjoint_a: + grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + else: + grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + if lower_a: + grad_a = array_ops.matrix_diag_part( + grad_a, k=(-(num_bands - 1), 0), align="LEFT_RIGHT") + else: + grad_a = array_ops.matrix_diag_part( + grad_a, k=(0, num_bands - 1), align="LEFT_RIGHT") + # If the static batch shapes are equal, we don't need to unbroadcast. + if (a.shape.is_fully_defined() and b.shape.is_fully_defined() and + a.shape[:-2] == b.shape[:-2]): + return grad_a, grad_b + a_shape = array_ops.shape(a) + b_shape = array_ops.shape(b) + ra, rb = array_ops.broadcast_gradient_args(a_shape[:-2], b_shape[:-2]) + grad_a = array_ops.reshape(math_ops.reduce_sum(grad_a, axis=ra), a_shape) + grad_b = array_ops.reshape(math_ops.reduce_sum(grad_b, axis=rb), b_shape) + return grad_a, grad_b + + +@ops.RegisterGradient("MatrixTriangularSolve") +def _MatrixTriangularSolveGrad(op: ops.Operation, grad): + """Gradient for MatrixTriangularSolve.""" + a = op.inputs[0] + b = op.inputs[1] + adjoint_a = op.get_attr("adjoint") + lower_a = op.get_attr("lower") + c = op.outputs[0] + grad_b = linalg_ops.matrix_triangular_solve( + a, grad, lower=lower_a, adjoint=not adjoint_a) + if adjoint_a: + grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + else: + grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) # pylint: disable=invalid-unary-operand-type + if lower_a: + grad_a = array_ops.matrix_band_part(grad_a, -1, 0) + else: + grad_a = array_ops.matrix_band_part(grad_a, 0, -1) + # If the static batch shapes are equal, we don't need to unbroadcast. + if (a.shape.is_fully_defined() and b.shape.is_fully_defined() and + a.shape[:-2] == b.shape[:-2]): + return grad_a, grad_b + a_shape = array_ops.shape(a) + b_shape = array_ops.shape(b) + ra, rb = array_ops.broadcast_gradient_args(a_shape[:-2], b_shape[:-2]) + grad_a = array_ops.reshape(math_ops.reduce_sum(grad_a, axis=ra), a_shape) + grad_b = array_ops.reshape(math_ops.reduce_sum(grad_b, axis=rb), b_shape) + return grad_a, grad_b + + +# To avoid nan in cases with degenerate eigenvalues or +# degenerate/zero singular values in calculations of +# f and s_inv_mat, we introduce a Lorentz broadening. +def _SafeReciprocal(x, epsilon=1e-20): + return x * math_ops.reciprocal(x * x + epsilon) + + +# pylint: disable=g-doc-args +@ops.RegisterGradient("Eig") +def _EigGrad(op: ops.Operation, grad_e, grad_v): + """Gradient for Eig. + + Based on eq. 4.77 from paper by + Christoph Boeddeker et al. + https://arxiv.org/abs/1701.00392 + See also + "Computation of eigenvalue and eigenvector derivatives + for a general complex-valued eigensystem" by Nico van der Aa. + As for now only distinct eigenvalue case is considered. + """ + e = op.outputs[0] + compute_v = op.get_attr("compute_v") + # a = op.inputs[0], which satisfies + # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] + with ops.control_dependencies([grad_e, grad_v]): + if compute_v: + v = op.outputs[1] + vt = _linalg.adjoint(v) + # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). + # Notice that because of the term involving f, the gradient becomes + # infinite (or NaN in practice) when eigenvalues are not unique. + # Mathematically this should not be surprising, since for (k-fold) + # degenerate eigenvalues, the corresponding eigenvectors are only defined + # up to arbitrary rotation in a (k-dimensional) subspace. + f = array_ops.matrix_set_diag( + _SafeReciprocal( + array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), + array_ops.zeros_like(e)) + f = math_ops.conj(f) + vgv = math_ops.matmul(vt, grad_v) + mid = array_ops.matrix_diag(grad_e) + diag_grad_part = array_ops.matrix_diag( + array_ops.matrix_diag_part( + math_ops.cast(math_ops.real(vgv), vgv.dtype))) + mid += f * (vgv - math_ops.matmul(math_ops.matmul(vt, v), diag_grad_part)) + # vt is formally invertible as long as the original matrix is + # diagonalizable. However, in practice, vt may + # be ill-conditioned when matrix original matrix is close to + # non-diagonalizable one + grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(mid, vt)) + else: + _, v = linalg_ops.eig(op.inputs[0]) + vt = _linalg.adjoint(v) + # vt is formally invertible as long as the original matrix is + # diagonalizable. However, in practice, vt may + # be ill-conditioned when matrix original matrix is close to + # non-diagonalizable one + grad_a = linalg_ops.matrix_solve( + vt, math_ops.matmul(array_ops.matrix_diag(grad_e), vt)) + return math_ops.cast(grad_a, op.inputs[0].dtype) + + +@ops.RegisterGradient("SelfAdjointEigV2") +def _SelfAdjointEigV2Grad(op: ops.Operation, grad_e, grad_v): + """Gradient for SelfAdjointEigV2.""" + e = op.outputs[0] + compute_v = op.get_attr("compute_v") + # a = op.inputs[0], which satisfies + # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i] + with ops.control_dependencies([grad_e, grad_v]): + if compute_v: + v = op.outputs[1] + # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0). + # Notice that because of the term involving f, the gradient becomes + # infinite (or NaN in practice) when eigenvalues are not unique. + # Mathematically this should not be surprising, since for (k-fold) + # degenerate eigenvalues, the corresponding eigenvectors are only defined + # up to arbitrary rotation in a (k-dimensional) subspace. + f = array_ops.matrix_set_diag( + _SafeReciprocal( + array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), + array_ops.zeros_like(e)) + grad_a = math_ops.matmul( + v, + math_ops.matmul( + array_ops.matrix_diag(grad_e) + + f * math_ops.matmul(v, grad_v, adjoint_a=True), + v, + adjoint_b=True)) + else: + _, v = linalg_ops.self_adjoint_eig(op.inputs[0]) + grad_a = math_ops.matmul(v, + math_ops.matmul( + array_ops.matrix_diag(grad_e), + v, + adjoint_b=True)) + # The forward op only depends on the lower triangular part of a, so here we + # symmetrize and take the lower triangle + grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0) + grad_a = array_ops.matrix_set_diag(grad_a, + 0.5 * array_ops.matrix_diag_part(grad_a)) + return grad_a + + +@ops.RegisterGradient("Svd") +def _SvdGrad(op: ops.Operation, grad_s, grad_u, grad_v): + """Gradient for the singular value decomposition.""" + + # The derivation for the compute_uv=False case, and most of + # the derivation for the full_matrices=True case, are in + # Giles' paper (see reference at top of file). A derivation for + # the full_matrices=False case is available at + # https://j-towns.github.io/papers/svd-derivative.pdf + # The derivation for complex valued SVD can be found in + # https://re-ra.xyz/misc/complexsvd.pdf or + # https://giggleliu.github.io/2019/04/02/einsumbp.html + a = op.inputs[0] + a_shape = a.get_shape().with_rank_at_least(2) + grad_s = math_ops.cast(grad_s, a.dtype) + grad_s_mat = array_ops.matrix_diag(grad_s) + + if not op.get_attr("compute_uv"): + s, u, v = linalg_ops.svd(a, compute_uv=True) + grad_a = math_ops.matmul(u, math_ops.matmul(grad_s_mat, v, adjoint_b=True)) + grad_a.set_shape(a_shape) + return grad_a + + full_matrices = op.get_attr("full_matrices") + + grad_u_shape = grad_u.get_shape().with_rank_at_least(2) + grad_v_shape = grad_v.get_shape().with_rank_at_least(2) + m = a_shape.dims[-2].merge_with(grad_u_shape[-2]) + n = a_shape.dims[-1].merge_with(grad_v_shape[-2]) + batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with( + grad_v_shape[:-2]) + a_shape = batch_shape.concatenate([m, n]) + + m = a_shape.dims[-2].value + n = a_shape.dims[-1].value + # TODO(rmlarsen): Make this work with placeholders. + if m is None or n is None: + raise NotImplementedError( + "SVD gradient has not been implemented for input with unknown " + "inner matrix shape.") + + s = op.outputs[0] + u = op.outputs[1] + v = op.outputs[2] + s = math_ops.cast(s, a.dtype) + + use_adjoint = False + if m > n: + # Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the + # Hermitian transpose of the gradient at the end. + use_adjoint = True + m, n = n, m + u, v = v, u + grad_u, grad_v = grad_v, grad_u + + with ops.control_dependencies([grad_s, grad_u, grad_v]): + if full_matrices and abs(m - n) > 1: + raise NotImplementedError( + "svd gradient is not implemented for abs(m - n) > 1 " + f"when full_matrices is True. Received: m={m} and n={n} from " + f"op input={a} with shape={a_shape}.") + s_mat = array_ops.matrix_diag(s) + s2 = math_ops.square(s) + + # NOTICE: Because of the term involving f, the gradient becomes + # infinite (or NaN in practice) when singular values are not unique. + # Mathematically this should not be surprising, since for (k-fold) + # degenerate singular values, the corresponding singular vectors are + # only defined up a (k-dimensional) subspace. In practice, this can + # lead to numerical instability when singular values are close but not + # exactly equal. + + s_shape = array_ops.shape(s) + f = array_ops.matrix_set_diag( + _SafeReciprocal( + array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1)), + array_ops.zeros_like(s)) + s_inv_mat = array_ops.matrix_diag(_SafeReciprocal(s)) + + v1 = v[..., :, :m] + grad_v1 = grad_v[..., :, :m] + + u_gu = math_ops.matmul(u, grad_u, adjoint_a=True) + v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True) + + f_u = f * u_gu + f_v = f * v_gv + + term1_nouv = ( + grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) + + math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v))) + + term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True)) + + if m == n: + grad_a_before_transpose = term1 + else: + gv1t = array_ops.matrix_transpose(grad_v1, conjugate=True) + gv1t_v1 = math_ops.matmul(gv1t, v1) + term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True) + + if full_matrices: + v2 = v[..., :, m:n] + grad_v2 = grad_v[..., :, m:n] + + v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True) + term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True) + + u_s_inv = math_ops.matmul(u, s_inv_mat) + term2 = math_ops.matmul(u_s_inv, term2_nous) + + grad_a_before_transpose = term1 + term2 + + if a.dtype.is_complex: + eye = _linalg.eye(s_shape[-1], batch_shape=s_shape[:-1], dtype=a.dtype) + l = eye * v_gv + term3_nouv = math_ops.matmul(s_inv_mat, _linalg.adjoint(l) - l) + term3 = 1 / 2. * math_ops.matmul( + u, math_ops.matmul(term3_nouv, v1, adjoint_b=True)) + + grad_a_before_transpose += term3 + + if use_adjoint: + grad_a = array_ops.matrix_transpose( + grad_a_before_transpose, conjugate=True) + else: + grad_a = grad_a_before_transpose + + grad_a.set_shape(a_shape) + return grad_a + + +def _LeftShift(x): + """Shifts next-to-last dimension to the left, adding zero on the right.""" + rank = array_ops.rank(x) + zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32) + pad = array_ops.concat([zeros, array_ops.constant([[0, 1], [0, 0]])], axis=0) + return array_ops.pad(x[..., 1:, :], pad) + + +def _RightShift(x): + """Shifts next-to-last dimension to the right, adding zero on the left.""" + rank = array_ops.rank(x) + zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32) + pad = array_ops.concat([zeros, array_ops.constant([[1, 0], [0, 0]])], axis=0) + return array_ops.pad(x[..., :-1, :], pad) + + +@ops.RegisterGradient("TridiagonalMatMul") +def _TridiagonalMatMulGrad(op: ops.Operation, grad): + """Gradient for TridiagonalMatMul.""" + superdiag_conj = array_ops.matrix_transpose(op.inputs[0], conjugate=True) + maindiag_conj = array_ops.matrix_transpose(op.inputs[1], conjugate=True) + subdiag_conj = array_ops.matrix_transpose(op.inputs[2], conjugate=True) + rhs_conj = math_ops.conj(op.inputs[3]) + + superdiag_grad = math_ops.reduce_sum(_LeftShift(rhs_conj) * grad, axis=-1) + maindiag_grad = math_ops.reduce_sum(rhs_conj * grad, axis=-1) + subdiag_grad = math_ops.reduce_sum(_RightShift(rhs_conj) * grad, axis=-1) + rhs_grad = _RightShift(superdiag_conj * grad) + \ + maindiag_conj * grad + _LeftShift(subdiag_conj * grad) + + superdiag_grad = array_ops.expand_dims(superdiag_grad, -2) + maindiag_grad = array_ops.expand_dims(maindiag_grad, -2) + subdiag_grad = array_ops.expand_dims(subdiag_grad, -2) + + return superdiag_grad, maindiag_grad, subdiag_grad, rhs_grad + + +@ops.RegisterGradient("TridiagonalSolve") +def _TridiagonalSolveGrad(op: ops.Operation, grad): + """Gradient for TridiagonalSolveGrad.""" + diags = op.inputs[0] + x = op.outputs[0] + partial_pivoting = op.get_attr("partial_pivoting") + perturb_singular = op.get_attr("perturb_singular") + + # Transposing the matrix within tridiagonal_solve kernel by interchanging + # superdiagonal and subdiagonal wouldn't work on GPU due to mismatch with + # paddings required by cusparse*gtsv routines. + # So constructing the transposed matrix in Python. + diags_transposed = _TransposeTridiagonalMatrix(diags) + + grad_rhs = linalg_ops.tridiagonal_solve( + diags_transposed, + grad, + partial_pivoting=partial_pivoting, + perturb_singular=perturb_singular) + grad_diags = -_MatmulExtractingThreeDiagonals(grad_rhs, x) # pylint: disable=invalid-unary-operand-type + return grad_diags, grad_rhs + + +def _TransposeTridiagonalMatrix(diags): + """Transposes a tridiagonal matrix. + + Args: + diags: the diagonals of the input matrix in the compact form (see + linalg_ops.tridiagonal_solve). + + Returns: + Diagonals of the transposed matrix in the compact form. + """ + + diag = diags[..., 1, :] + + if diags.shape.is_fully_defined(): + # For fully defined tensor we can concat with a tensor of zeros, which is + # faster than using array_ops.pad(). + zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype) + superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1) + subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1) + else: + rank = array_ops.rank(diags) + zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32) + superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])), + axis=0) + superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad) + subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])), + axis=0) + subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad) + return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2) + + +def _MatmulExtractingThreeDiagonals(x, y_tr): + """Multiplies matrices and extracts three diagonals from the product. + + With sizes M x K and K x M, this function takes O(MK) time and O(M) space, + while using math_ops.matmul, and then extracting the diagonals would take + O(M^2 K) time and O(M^2) space. + + Args: + x: first matrix + y_tr: second matrix transposed + + Returns: + Diagonals of the product in compact format (see + linalg_ops.tridiagonal_solve) + + """ + diag = math_ops.reduce_sum(x * y_tr, axis=-1) + + if y_tr.shape.is_fully_defined(): + zeros = array_ops.zeros( + list(x.shape[:-2]) + [1, x.shape[-1]], dtype=x.dtype) + superdiag = math_ops.reduce_sum( + x * array_ops.concat((y_tr[..., 1:, :], zeros), axis=-2), axis=-1) + subdiag = math_ops.reduce_sum( + x * array_ops.concat((zeros, y_tr[..., :-1, :]), axis=-2), axis=-1) + else: + rank = array_ops.rank(y_tr) + zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32) + superdiag_pad = array_ops.concat( + (zeros, array_ops.constant([[0, 1], [0, 0]])), axis=0) + superdiag = math_ops.reduce_sum( + x * array_ops.pad(y_tr[..., 1:, :], superdiag_pad), axis=-1) + subdiag_pad = array_ops.concat( + (zeros, array_ops.constant([[1, 0], [0, 0]])), axis=0) + subdiag = math_ops.reduce_sum( + x * array_ops.pad(y_tr[..., :-1, :], subdiag_pad), axis=-1) + return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/lookup_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/lookup_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9731fff62898dccf887dc79ae4f7e3ea0ebf987a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/lookup_ops.py @@ -0,0 +1,2462 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Lookup operations.""" +# pylint: disable=g-bad-name +import collections +import functools +import uuid + +from tensorflow.python.checkpoint import saveable_compat +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_lookup_ops +# Ensure lookup gradients are registered +from tensorflow.python.ops import lookup_grad # pylint: disable=unused-import +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import string_ops +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_lookup_ops import * +from tensorflow.python.saved_model import registration +from tensorflow.python.trackable import asset +# pylint: enable=wildcard-import +from tensorflow.python.trackable import base as trackable_base +from tensorflow.python.trackable import resource +from tensorflow.python.training.saver import BaseSaverBuilder +from tensorflow.python.types import internal +from tensorflow.python.util import compat as compat_util +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["initialize_all_tables"]) +@deprecated(None, "Use `tf.tables_initializer` instead.") +def initialize_all_tables(name="init_all_tables"): + """Returns an Op that initializes all tables of the default graph. + + Args: + name: Optional name for the initialization op. + + Returns: + An Op that initializes all tables. Note that if there are + not tables the returned Op is a NoOp. + """ + return tables_initializer(name) + + +@tf_export(v1=["initializers.tables_initializer", "tables_initializer"]) +def tables_initializer(name="init_all_tables"): + """Returns an Op that initializes all tables of the default graph. + + Args: + name: Optional name for the initialization op. + + Returns: + An Op that initializes all tables. Note that if there are + not tables the returned Op is a NoOp. + + @compatibility(TF2) + `tf.compat.v1.tables_initializer` is no longer needed with eager execution and + `tf.function`. In TF2, when creating an initializable table like a + `tf.lookup.StaticHashTable`, the table will automatically be initialized on + creation. + + #### Before & After Usage Example + + Before: + + >>> with tf.compat.v1.Session(): + ... init = tf.compat.v1.lookup.KeyValueTensorInitializer(['a', 'b'], [1, 2]) + ... table = tf.compat.v1.lookup.StaticHashTable(init, default_value=-1) + ... tf.compat.v1.tables_initializer().run() + ... result = table.lookup(tf.constant(['a', 'c'])).eval() + >>> result + array([ 1, -1], dtype=int32) + + After: + + >>> init = tf.lookup.KeyValueTensorInitializer(['a', 'b'], [1, 2]) + >>> table = tf.lookup.StaticHashTable(init, default_value=-1) + >>> table.lookup(tf.constant(['a', 'c'])).numpy() + array([ 1, -1], dtype=int32) + + @end_compatibility + """ + initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS) + if initializers: + return control_flow_ops.group(*initializers, name=name) + return control_flow_ops.no_op(name=name) + + +def check_table_dtypes(table, key_dtype, value_dtype): + """Check that the given key_dtype and value_dtype matches the table dtypes. + + Args: + table: The table to check types against to. + key_dtype: The key data type to check. + value_dtype: The value data type to check. + + Raises: + TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data + types. + """ + if key_dtype.base_dtype != table.key_dtype: + raise TypeError(f"Invalid key dtype for table, expected {table.key_dtype} " + f"but got {key_dtype}.") + if value_dtype.base_dtype != table.value_dtype: + raise TypeError("Invalid value dtype for table, expected " + f"{table.value_dtype} but got {value_dtype}.") + + +class LookupInterface(resource.TrackableResource): + """Represent a lookup table that persists across different steps.""" + + def __init__(self, key_dtype, value_dtype): + """Construct a lookup table interface. + + Args: + key_dtype: The table key type. + value_dtype: The table value type. + """ + self._key_dtype = dtypes.as_dtype(key_dtype) + self._value_dtype = dtypes.as_dtype(value_dtype) + super(LookupInterface, self).__init__() + + def _create_resource(self): + raise NotImplementedError + + @property + def key_dtype(self): + """The table key dtype.""" + return self._key_dtype + + @property + def value_dtype(self): + """The table value dtype.""" + return self._value_dtype + + @property + def name(self): + """The name of the table.""" + return NotImplementedError + + def size(self, name=None): + """Compute the number of elements in this table.""" + raise NotImplementedError + + def lookup(self, keys, name=None): + """Looks up `keys` in a table, outputs the corresponding values.""" + raise NotImplementedError + + def __getitem__(self, keys): + """Looks up `keys` in a table, outputs the corresponding values.""" + return self.lookup(keys) + + +class InitializableLookupTableBase(LookupInterface): + """Initializable lookup table interface. + + An initializable lookup tables persist across different steps. + """ + + def __init__(self, default_value, initializer): + """Construct a table object from a table reference. + + If requires a table initializer object (subclass of `TableInitializerBase`). + It provides the table key and value types, as well as the op to initialize + the table. The caller is responsible to execute the initialization op. + + Args: + default_value: The value to use if a key is missing in the table. + initializer: The table initializer to use. + """ + super(InitializableLookupTableBase, self).__init__(initializer.key_dtype, + initializer.value_dtype) + self._default_value = ops.convert_to_tensor( + default_value, dtype=self._value_dtype) + self._default_value.get_shape().merge_with(tensor_shape.TensorShape([])) + if isinstance(initializer, trackable_base.Trackable): + self._initializer = self._track_trackable(initializer, "_initializer") + with ops.init_scope(): + self._resource_handle = self._create_resource() + if (not context.executing_eagerly() and + ops.get_default_graph()._get_control_flow_context() is not None): # pylint: disable=protected-access + with ops.init_scope(): + self._init_op = self._initialize() + else: + self._init_op = self._initialize() + + def _initialize(self): + return self._initializer.initialize(self) + + @property + def default_value(self): + """The default value of the table.""" + return self._default_value + + def size(self, name=None): + """Compute the number of elements in this table. + + Args: + name: A name for the operation (optional). + + Returns: + A scalar tensor containing the number of elements in this table. + """ + with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]): + return gen_lookup_ops.lookup_table_size_v2(self.resource_handle) + + def lookup(self, keys, name=None): + """Looks up `keys` in a table, outputs the corresponding values. + + The `default_value` is used for keys not present in the table. + + Args: + keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. + name: A name for the operation (optional). + + Returns: + A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged, + otherwise a dense `Tensor`. + + Raises: + TypeError: when `keys` or `default_value` doesn't match the table data + types. + """ + key_tensor = keys + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)): + key_tensor = keys.values + + if keys.dtype.base_dtype != self._key_dtype: + raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, " + f"received: {keys.dtype}") + + with ops.name_scope( + name, "%s_Lookup" % self.name, + (self.resource_handle, key_tensor, self._default_value)): + values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, + key_tensor, + self._default_value) + + values.set_shape(key_tensor.get_shape()) + if isinstance(keys, sparse_tensor.SparseTensor): + return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape) + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + elif isinstance(keys, internal.RaggedTensor): + return keys.with_values(values) + else: + return values + + +class InitializableLookupTableBaseV1(InitializableLookupTableBase): + + @property + def initializer(self): + return self._init_op + + +@registration.register_tf_serializable( + predicate=lambda obj: isinstance(obj, StaticHashTable)) +@tf_export("lookup.StaticHashTable", v1=[]) +class StaticHashTable(InitializableLookupTableBase): + """A generic hash table that is immutable once initialized. + + Example usage: + + >>> keys_tensor = tf.constant(['a', 'b', 'c']) + >>> vals_tensor = tf.constant([7, 8, 9]) + >>> input_tensor = tf.constant(['a', 'f']) + >>> table = tf.lookup.StaticHashTable( + ... tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), + ... default_value=-1) + >>> table.lookup(input_tensor).numpy() + array([ 7, -1], dtype=int32) + + Or for more pythonic code: + + >>> table[input_tensor].numpy() + array([ 7, -1], dtype=int32) + + The result of a lookup operation has the same shape as the argument: + + >>> input_tensor = tf.constant([['a', 'b'], ['c', 'd']]) + >>> table[input_tensor].numpy() + array([[ 7, 8], + [ 9, -1]], dtype=int32) + + + """ + + def __init__(self, + initializer, + default_value, + name=None, + experimental_is_anonymous=False): + """Creates a non-initialized `HashTable` object. + + Creates a table, the type of its keys and values are specified by the + initializer. + Before using the table you will have to initialize it. After initialization + the table will be immutable. + + Args: + initializer: The table initializer to use. See `HashTable` kernel for + supported key and value types. + default_value: The value to use if a key is missing in the table. + name: A name for the operation (optional). + experimental_is_anonymous: Whether to use anonymous mode for the + table (default is False). In anonymous mode, the table + resource can only be accessed via a resource handle. It can't + be looked up by a name. When all resource handles pointing to + that resource are gone, the resource will be deleted + automatically. + + Returns: + A `HashTable` object. + """ + self._initializer = initializer + self._default_value = default_value + self._is_anonymous = experimental_is_anonymous + if not self._is_anonymous: + self._shared_name = self._initializer._shared_name # pylint: disable=protected-access + if not self._shared_name: + # Force using a shared name so that StaticHashTable resources can be + # shared across different kernels. If no "shared_name" is set and + # "use_node_name_sharing" is False, then each kernel gets its own local + # resource. + self._shared_name = "hash_table_%s" % (str(uuid.uuid4()),) + self._name = name or "hash_table" + self._table_name = None + super(StaticHashTable, self).__init__(default_value, initializer) + self._value_shape = self._default_value.get_shape() + + def _create_resource(self): + if self._is_anonymous: + table_ref = gen_lookup_ops.anonymous_hash_table( + key_dtype=self._initializer.key_dtype, + value_dtype=self._initializer.value_dtype, + name=self._name) + else: + table_ref = gen_lookup_ops.hash_table_v2( + shared_name=self._shared_name, + key_dtype=self._initializer.key_dtype, + value_dtype=self._initializer.value_dtype, + name=self._name) + if context.executing_eagerly(): + self._table_name = None + else: + self._table_name = table_ref.op.name.split("/")[-1] + return table_ref + + @property + def name(self): + return self._table_name + + def export(self, name=None): + """Returns tensors of all keys and values in the table. + + Args: + name: A name for the operation (optional). + + Returns: + A pair of tensors with the first tensor containing all keys and the + second tensors containing all values in the table. + """ + with ops.name_scope(name, "%s_Export" % self.name, [self.resource_handle]): + exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2( + self.resource_handle, self._key_dtype, self._value_dtype) + + exported_values.set_shape(exported_keys.get_shape().concatenate( + self._value_shape)) + return exported_keys, exported_values + + def _serialize_to_proto(self, **unused_kwargs): + return None + + def _add_trackable_child(self, name, value): + setattr(self, name, value) + if isinstance(value, trackable_base.Trackable): + self._track_trackable(value, name) # pylint:disable=protected-access + + @classmethod + def _deserialize_from_proto(cls, **kwargs): + + class _RestoredStaticHashTable(resource.RestoredResource): # pylint: disable=protected-access + + @classmethod + def _resource_type(cls): + return "RestoredStaticHashTable" + + return _RestoredStaticHashTable._deserialize_from_proto(**kwargs) # pylint: disable=protected-access + + +@tf_export(v1=["lookup.StaticHashTable"]) +class StaticHashTableV1(StaticHashTable): + """A generic hash table that is immutable once initialized. + + When running in graph mode, you must evaluate the tensor returned by + `tf.tables_initializer()` before evaluating the tensor returned by + this class's `lookup()` method. Example usage in graph mode: + + ```python + keys_tensor = tf.constant([1, 2]) + vals_tensor = tf.constant([3, 4]) + input_tensor = tf.constant([1, 5]) + table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) + out = table.lookup(input_tensor) + with tf.Session() as sess: + sess.run(tf.tables_initializer()) + print(sess.run(out)) + ``` + + Note that in graph mode if you set `experimental_is_anonymous` to + `True`, you should only call `Session.run` once, otherwise each + `Session.run` will create (and destroy) a new table unrelated to + each other, leading to errors such as "Table not initialized". + You can do so like this: + + ```python + keys_tensor = tf.constant([1, 2]) + vals_tensor = tf.constant([3, 4]) + input_tensor = tf.constant([1, 5]) + table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1, + experimental_is_anonymous=True) + with tf.control_dependencies([tf.tables_initializer()]): + out = table.lookup(input_tensor) + with tf.Session() as sess: + print(sess.run(out)) + ``` + + In eager mode, no special code is needed to initialize the table. + Example usage in eager mode: + + ```python + tf.enable_eager_execution() + keys_tensor = tf.constant([1, 2]) + vals_tensor = tf.constant([3, 4]) + input_tensor = tf.constant([1, 5]) + table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) + print(table.lookup(input_tensor)) + ``` + """ + + @property + def initializer(self): + return self._init_op + + +# For backwards compatibility. This will be removed in TF 2.0. +class HashTable(StaticHashTableV1): + + @property + def init(self): + return self.initializer + + +class TableInitializerBase(trackable_base.Trackable): + """Base class for lookup table initializers.""" + + def __init__(self, key_dtype, value_dtype): + """Construct a table initializer object. + + Args: + key_dtype: Type of the table keys. + value_dtype: Type of the table values. + """ + self._key_dtype = dtypes.as_dtype(key_dtype) + self._value_dtype = dtypes.as_dtype(value_dtype) + + @property + def key_dtype(self): + """The expected table key dtype.""" + return self._key_dtype + + @property + def value_dtype(self): + """The expected table value dtype.""" + return self._value_dtype + + def initialize(self, table): + """Returns the table initialization op.""" + raise NotImplementedError + + @property + def _shared_name(self): + """Returns a shared name to be used by the table.""" + shared_name = "" + if context.executing_eagerly(): + # Ensure a unique name when eager execution is enabled to avoid spurious + # sharing issues. + # TODO(rohanj): Use context.anonymous_name() instead. + shared_name += str(ops.uid()) + return shared_name + + +@tf_export("lookup.KeyValueTensorInitializer") +class KeyValueTensorInitializer(TableInitializerBase): + """Table initializers given `keys` and `values` tensors. + + >>> keys_tensor = tf.constant(['a', 'b', 'c']) + >>> vals_tensor = tf.constant([7, 8, 9]) + >>> input_tensor = tf.constant(['a', 'f']) + >>> init = tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor) + >>> table = tf.lookup.StaticHashTable( + ... init, + ... default_value=-1) + >>> table.lookup(input_tensor).numpy() + array([ 7, -1], dtype=int32) + + """ + + def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None): + """Constructs a table initializer object based on keys and values tensors. + + Args: + keys: The tensor for the keys. + values: The tensor for the values. + key_dtype: The `keys` data type. Used when `keys` is a python array. + value_dtype: The `values` data type. Used when `values` is a python array. + name: A name for the operation (optional). + """ + if (not context.executing_eagerly() and + ops.get_default_graph()._get_control_flow_context() is not None): # pylint: disable=protected-access + with ops.init_scope(): + self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys") + self._values = ops.convert_to_tensor( + values, dtype=value_dtype, name="values") + else: + self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys") + self._values = ops.convert_to_tensor( + values, dtype=value_dtype, name="values") + self._name = name if name is not None else "key_value_init" + if context.executing_eagerly(): + # Ensure a unique name when eager execution is enabled to avoid spurious + # sharing issues. + # TODO(rohanj): Use context.anonymous_name() instead. + self._name += str(ops.uid()) + + super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, + self._values.dtype) + + def initialize(self, table): + """Initializes the given `table` with `keys` and `values` tensors. + + Args: + table: The table to initialize. + + Returns: + The operation that initializes the table. + + Raises: + TypeError: when the keys and values data types do not match the table + key and value data types. + """ + check_table_dtypes(table, self._keys.dtype, self._values.dtype) + with ops.name_scope( + self._name, values=(table.resource_handle, self._keys, self._values)): + init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle, + self._keys, self._values) + ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) + return init_op + + +@tf_export("lookup.TextFileIndex") +class TextFileIndex: + """The key and value content to get from each line. + + This class defines the key and value used for `tf.lookup.TextFileInitializer`. + + The key and value content to get from each line is specified either + by the following, or a value `>=0`. + * `TextFileIndex.LINE_NUMBER` means use the line number starting from zero, + expects data type int64. + * `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data + type string. + + A value `>=0` means use the index (starting at zero) of the split line based + on `delimiter`. + """ + WHOLE_LINE = -2 + LINE_NUMBER = -1 + + +@tf_export("lookup.TextFileInitializer") +class TextFileInitializer(TableInitializerBase): + r"""Table initializers from a text file. + + This initializer assigns one entry in the table for each line in the file. + + The key and value type of the table to initialize is given by `key_dtype` and + `value_dtype`. + + The key and value content to get from each line is specified by + the `key_index` and `value_index`. + + * `TextFileIndex.LINE_NUMBER` means use the line number starting from zero, + expects data type int64. + * `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data + type string. + * A value `>=0` means use the index (starting at zero) of the split line based + on `delimiter`. + + For example if we have a file with the following content: + + >>> import tempfile + >>> f = tempfile.NamedTemporaryFile(delete=False) + >>> content='\n'.join(["emerson 10", "lake 20", "palmer 30",]) + >>> f.file.write(content.encode('utf-8')) + >>> f.file.close() + + The following snippet initializes a table with the first column as keys and + second column as values: + + * `emerson -> 10` + * `lake -> 20` + * `palmer -> 30` + + >>> init= tf.lookup.TextFileInitializer( + ... filename=f.name, + ... key_dtype=tf.string, key_index=0, + ... value_dtype=tf.int64, value_index=1, + ... delimiter=" ") + >>> table = tf.lookup.StaticHashTable(init, default_value=-1) + >>> table.lookup(tf.constant(['palmer','lake','tarkus'])).numpy() + + Similarly to initialize the whole line as keys and the line number as values. + + * `emerson 10 -> 0` + * `lake 20 -> 1` + * `palmer 30 -> 2` + + >>> init = tf.lookup.TextFileInitializer( + ... filename=f.name, + ... key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, + ... value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER) + >>> table = tf.lookup.StaticHashTable(init, -1) + >>> table.lookup(tf.constant('palmer 30')).numpy() + 2 + """ + + def __init__(self, + filename, + key_dtype, + key_index, + value_dtype, + value_index, + vocab_size=None, + delimiter="\t", + name=None, + value_index_offset=0): + """Constructs a table initializer object to populate from a text file. + + It generates one key-value pair per line. The type of table key and + value are specified by `key_dtype` and `value_dtype`, respectively. + Similarly the content of the key and value are specified by the key_index + and value_index. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string or int64. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + + Args: + filename: The filename of the text file to be used for initialization. The + path must be accessible from wherever the graph is initialized (eg. + trainer or eval workers). The filename may be a scalar `Tensor`. + key_dtype: The `key` data type. + key_index: the index that represents information of a line to get the + table 'key' values from. + value_dtype: The `value` data type. + value_index: the index that represents information of a line to get the + table 'value' values from.' + vocab_size: The number of elements in the file, if known. + delimiter: The delimiter to separate fields in a line. + name: A name for the operation (optional). + value_index_offset: A number to add to all indices extracted from the file + This is useful for cases where a user would like to reserve one or more + low index values for control characters. For instance, if you would + like to ensure that no vocabulary item is mapped to index 0 (so you can + reserve 0 for a masking value), you can set value_index_offset to 1; + this will mean that the first vocabulary element is mapped to 1 + instead of 0. + + Raises: + ValueError: when the filename is empty, or when the table key and value + data types do not match the expected data types. + """ + if not isinstance(filename, tensor_lib.Tensor) and not filename: + raise ValueError("`filename` argument required for tf.lookup.TextFileInitializer") + + self._filename_arg = filename + key_dtype = dtypes.as_dtype(key_dtype) + value_dtype = dtypes.as_dtype(value_dtype) + + if key_index < -2: + raise ValueError(f"`key_index` should be >= -2, received: {key_index}.") + + if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64: + raise ValueError("`key_dtype` must be int64 if `key_index` is " + f"{TextFileIndex.LINE_NUMBER}, received: {key_dtype}") + if ((key_index == TextFileIndex.WHOLE_LINE) and + (not key_dtype.is_integer) and (key_dtype != dtypes.string)): + raise ValueError( + "`key_dtype` should be either integer or string for `key_index` " + f"{TextFileIndex.WHOLE_LINE}, received: {key_dtype}") + if value_index < -2: + raise ValueError("`value_index` should be >= -2, received: " + f"{value_index}") + + if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64: + raise ValueError("`value_dtype` must be int64 for `value_index` " + f"{TextFileIndex.LINE_NUMBER}, received: {value_dtype}") + if ((value_index == TextFileIndex.WHOLE_LINE) and + (not value_dtype.is_integer) and (value_dtype != dtypes.string)): + raise ValueError( + "`value_dtype` should be either integer or string for `value_index` " + f"{TextFileIndex.WHOLE_LINE}, received: {value_dtype}") + + if (vocab_size is not None) and (vocab_size <= 0): + raise ValueError(f"`vocab_size` should be > 0, received: {vocab_size}") + + self._key_index = key_index + self._value_index = value_index + self._vocab_size = vocab_size + self._delimiter = delimiter + self._name = name + self._filename = self._track_trackable( + asset.Asset(filename), "_filename") + self._offset = value_index_offset + + super(TextFileInitializer, self).__init__(key_dtype, value_dtype) + + def initialize(self, table): + """Initializes the table from a text file. + + Args: + table: The table to be initialized. + + Returns: + The operation that initializes the table. + + Raises: + TypeError: when the keys and values data types do not match the table + key and value data types. + """ + check_table_dtypes(table, self.key_dtype, self.value_dtype) + with ops.name_scope(self._name, "text_file_init", (table.resource_handle,)): + filename = ops.convert_to_tensor( + self._filename, dtypes.string, name="asset_filepath") + init_op = gen_lookup_ops.initialize_table_from_text_file_v2( + table.resource_handle, filename, self._key_index, self._value_index, + -1 if self._vocab_size is None else self._vocab_size, self._delimiter, + self._offset) + ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) + # If the filename tensor is anything other than a string constant (e.g., + # if it is a placeholder) then it does not make sense to track it as an + # asset. + if not context.executing_eagerly() and constant_op.is_constant(filename): + ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename) + return init_op + + @property + def _shared_name(self): + if self._vocab_size: + # Keep the shared_name: + # _____ + if self._offset: + shared_name = "hash_table_%s_%d_%s_%s_%s" % ( + self._filename_arg, self._vocab_size, self._key_index, + self._value_index, self._offset) + else: + shared_name = "hash_table_%s_%d_%s_%s" % ( + self._filename_arg, self._vocab_size, self._key_index, + self._value_index) + else: + # Keep the shared_name + # ____ + if self._offset: + shared_name = "hash_table_%s_%s_%s_%s" % ( + self._filename_arg, self._key_index, self._value_index, + self._offset) + else: + shared_name = "hash_table_%s_%s_%s" % ( + self._filename_arg, self._key_index, self._value_index) + + return shared_name + + +class TextFileStringTableInitializer(TextFileInitializer): + """Table initializer for `int64` IDs to string tables from a text file.""" + + def __init__(self, + filename, + key_column_index=TextFileIndex.LINE_NUMBER, + value_column_index=TextFileIndex.WHOLE_LINE, + vocab_size=None, + delimiter="\t", + name="text_file_string_table_init"): + """Constructs an initializer for an id-to-string table from a text file. + + It populates a table that its key and value types are int64 and string, + respectively. It generates one key-value pair per line. + The content of the key and value are specified by `key_column_index` + and `value_column_index`. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string or int64. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + + Args: + filename: The filename of the text file to be used for initialization. The + path must be accessible from wherever the graph is initialized (eg. + trainer or eval workers). The filename may be a scalar `Tensor`. + key_column_index: The column index from the text file to get the keys + from. The default is to use the line number, starting from zero. + value_column_index: The column index from the text file to get the values + from. The default is to use the whole line content. + vocab_size: The number of elements in the file, if known. + delimiter: The delimiter to separate fields in a line. + name: Optional name for the op. + + Raises: + TypeError: when the filename is empty, or when the table key and value + data types do not match the expected data types. + """ + super(TextFileStringTableInitializer, self).__init__( + filename, + dtypes.int64, + key_column_index, + dtypes.string, + value_column_index, + vocab_size=vocab_size, + delimiter=delimiter, + name=name) + + +class TextFileIdTableInitializer(TextFileInitializer): + """Table initializer for string to `int64` IDs tables from a text file.""" + + def __init__(self, + filename, + key_column_index=TextFileIndex.WHOLE_LINE, + value_column_index=TextFileIndex.LINE_NUMBER, + vocab_size=None, + delimiter="\t", + name="text_file_id_table_init", + key_dtype=dtypes.string): + """Constructs an initializer for an string-to-id table from a text file. + + It populates a table that its key and value types are string and int64, + respectively. It generates one key-value pair per line. + The content of the key and value are specified by the key_index + and value_index. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + + Args: + filename: The filename of the text file to be used for initialization. The + path must be accessible from wherever the graph is initialized (eg. + trainer or eval workers). The filename may be a scalar `Tensor`. + key_column_index: The column index from the text file to get the `key` + values from. The default is to use the whole line content. + value_column_index: The column index from the text file to get the `value` + values from. The default is to use the line number, starting from zero. + vocab_size: The number of elements in the file, if known. + delimiter: The delimiter to separate fields in a line. + name: Optional name for the op. + key_dtype: The `key` data type. + + Raises: + TypeError: when the filename is empty, or when the table key and value + data types do not match the expected data types. + """ + super(TextFileIdTableInitializer, self).__init__( + filename, + key_dtype, + key_column_index, + dtypes.int64, + value_column_index, + vocab_size=vocab_size, + delimiter=delimiter, + name=name) + + +class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])): + """A structure for the spec of the hashing function to use for hash buckets. + + `hasher` is the name of the hashing function to use (eg. "fasthash", + "stronghash"). + `key` is optional and specify the key to use for the hash function if + supported, currently only used by a strong hash. + + Fields: + hasher: The hasher name to use. + key: The key to be used by the hashing function, if required. + """ + __slots__ = () + + +FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name + + +class StrongHashSpec(HasherSpec): + """A structure to specify a key of the strong keyed hash spec. + + The strong hash requires a `key`, which is a list of 2 unsigned integer + numbers. These should be non-zero; random numbers generated from random.org + would be a fine choice. + + Fields: + key: The key to be used by the keyed hashing function. + """ + __slots__ = () + + def __new__(cls, key): + if len(key) != 2: + raise ValueError(f"`key` must have size 2, received {len(key)}") + + if not isinstance(key[0], compat_util.integral_types) or not isinstance( + key[1], compat_util.integral_types): + raise TypeError("Invalid key %s. Must be unsigned integer values." % key) + + return super(cls, StrongHashSpec).__new__(cls, "stronghash", key) + + +def _as_string(tensor): + if dtypes.string == tensor.dtype.base_dtype: + return tensor + return string_ops.as_string(tensor) + + +class IdTableWithHashBuckets(LookupInterface): + r"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets. + + For example, if an instance of `IdTableWithHashBuckets` is initialized with a + string-to-id table that maps: + + * `emerson -> 0` + * `lake -> 1` + * `palmer -> 2` + + The `IdTableWithHashBuckets` object will performs the following mapping: + + * `emerson -> 0` + * `lake -> 1` + * `palmer -> 2` + * ` -> bucket_id`, where bucket_id will be between `3` and + `3 + num_oov_buckets - 1`, calculated by: + `hash() % num_oov_buckets + vocab_size` + + If input_tensor is `["emerson", "lake", "palmer", "king", "crimson"]`, + the lookup result is `[0, 1, 2, 4, 7]`. + + If `table` is None, only out-of-vocabulary buckets are used. + + Example usage: + + ```python + num_oov_buckets = 3 + input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"]) + table = tf.IdTableWithHashBuckets( + tf.StaticHashTable( + tf.lookup.TextFileInitializer( + filename, + key_dtype=tf.string, + key_index=tf.lookup.TextFileIndex.WHOLE_LINE, + value_dtype=tf.int64, + value_index=tf.lookup.TextFileIndex.LINE_NUMBER, + delimiter="\t"), + default_value), + num_oov_buckets) + out = table.lookup(input_tensor). + table.init.run() + print(out.eval()) + ``` + + The hash function used for generating out-of-vocabulary buckets ID is handled + by `hasher_spec`. + """ + + def __init__(self, + table, + num_oov_buckets, + hasher_spec=FastHashSpec, + name=None, + key_dtype=None): + """Construct a `IdTableWithHashBuckets` object. + + Args: + table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids. + num_oov_buckets: Number of buckets to use for out-of-vocabulary keys. + hasher_spec: A `HasherSpec` to specify the hash function to use for + assignation of out-of-vocabulary buckets (optional). + name: A name for the operation (optional). + key_dtype: Data type of keys passed to `lookup`. Defaults to + `table.key_dtype` if `table` is specified, otherwise `tf.string`. Must + be string or integer, and must be castable to `table.key_dtype`. + + Raises: + ValueError: when `table` in None and `num_oov_buckets` is not positive. + TypeError: when `hasher_spec` is invalid. + """ + # If a name ends with a '/' it is a "name scope", remove all trailing '/' + # characters to use as table name. + if name: + name = name.rstrip("/") + if table: + if key_dtype is None: + key_dtype = table.key_dtype + supported_table_key_dtypes = (dtypes.int64, dtypes.string) + if table.key_dtype not in supported_table_key_dtypes: + raise TypeError("Invalid `key_dtype`, expected one of " + f"{supported_table_key_dtypes}, received {key_dtype}.") + if table.key_dtype.is_integer != key_dtype.is_integer: + raise TypeError("Invalid `key dtype`, expected %s but got %s." % + ("integer" if key_dtype.is_integer else "non-integer", + table.key_dtype)) + if table.value_dtype != dtypes.int64: + raise TypeError("Invalid `value_dtype`: expected int64 but got %s." % + (table.value_dtype)) + self._table = table + name = name or self._table.name + else: + if num_oov_buckets <= 0: + raise ValueError("`oov_buckets` must be > 0 if no `table` is supplied.") + key_dtype = dtypes.string if key_dtype is None else key_dtype + self._table = None + name = name or "hash_bucket" + if (not key_dtype.is_integer) and (dtypes.string != key_dtype): + raise TypeError("Invalid `key_dtype`, expected integer or string, got " + f"{key_dtype}.") + self._num_oov_buckets = num_oov_buckets + + if not isinstance(hasher_spec, HasherSpec): + raise TypeError("`hasher_spec` must be of type HasherSpec, got " + f"{type(hasher_spec)}.") + self._hasher_spec = hasher_spec + if name: + self._table_name = name.split("/")[-1] + else: + self._table_name = None + super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64) + + def _create_resource(self): + if self._table is not None: + return self._table._create_resource() # pylint: disable=protected-access + return None + + def _initialize(self): + if self._table is not None: + return self._table._initialize() # pylint: disable=protected-access + with ops.name_scope(None, "init"): + return control_flow_ops.no_op() + + @property + def initializer(self): + if self._table is not None: + return self._table._init_op # pylint: disable=protected-access + with ops.name_scope(None, "init"): + return control_flow_ops.no_op() + + @property + @deprecated("2018-12-15", "Use `initializer` instead.") + def init(self): + return self.initializer + + @property + def resource_handle(self): + if self._table is not None: + return self._table.resource_handle + return None + + @property + def name(self): + return self._table_name + + def size(self, name=None): + """Compute the number of elements in this table.""" + with ops.name_scope(name, "%s_Size" % self.name): + if self._table: + tsize = self._table.size() + else: + tsize = ops.convert_to_tensor(0, dtype=dtypes.int64) + return tsize + self._num_oov_buckets + + def _get_string_to_hash_bucket_fn(self, hasher_spec): + """Returns the string_to_hash_bucket op to use based on `hasher_spec`.""" + if not isinstance(hasher_spec, HasherSpec): + raise TypeError("`hasher_spec` must be of type HasherSpec, got " + f"{type(hasher_spec)}.") + if hasher_spec.hasher == "fasthash": + return string_ops.string_to_hash_bucket_fast + if hasher_spec.hasher == "legacy": + return string_ops.string_to_hash_bucket + if hasher_spec.hasher == "stronghash": + return functools.partial( + string_ops.string_to_hash_bucket_strong, key=hasher_spec.key) + raise ValueError( + f"Found unknown hasher {hasher_spec.hasher} in `hasher_spec`") + + def lookup(self, keys, name=None): + """Looks up `keys` in the table, outputs the corresponding values. + + It assigns out-of-vocabulary keys to buckets based in their hashes. + + Args: + keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. + name: Optional name for the op. + + Returns: + A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged, + otherwise a dense `Tensor`. + + Raises: + TypeError: when `keys` doesn't match the table key data type. + """ + if keys.dtype.base_dtype != self._key_dtype: + raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, " + f"received: {keys.dtype}") + values = keys + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)): + values = keys.values + if self._table and (self._table.key_dtype.base_dtype == dtypes.int64): + values = math_ops.cast(values, dtypes.int64) + + if self._num_oov_buckets == 0: + ids = self._table.lookup(values, name=name) + else: + # TODO(yleon): Consider moving this functionality to its own kernel. + with ops.name_scope(name, "%s_Lookup" % self.name): + str_to_hash_bucket = self._get_string_to_hash_bucket_fn( + self._hasher_spec) + buckets = str_to_hash_bucket( + _as_string(values), + num_buckets=self._num_oov_buckets, + name="hash_bucket") + if self._table: + ids = self._table.lookup(values) + buckets = math_ops.add(buckets, self._table.size()) + is_id_non_default = math_ops.not_equal(ids, self._table.default_value) + ids = array_ops.where_v2(is_id_non_default, ids, buckets) + else: + ids = buckets + if isinstance(keys, sparse_tensor.SparseTensor): + return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape) + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + elif isinstance(keys, internal.RaggedTensor): + return keys.with_values(ids) + return ids + + +@tf_export("lookup.StaticVocabularyTable", v1=[]) +class StaticVocabularyTable(LookupInterface): + r"""String to Id table that assigns out-of-vocabulary keys to hash buckets. + + For example, if an instance of `StaticVocabularyTable` is initialized with a + string-to-id initializer that maps: + + >>> init = tf.lookup.KeyValueTensorInitializer( + ... keys=tf.constant(['emerson', 'lake', 'palmer']), + ... values=tf.constant([0, 1, 2], dtype=tf.int64)) + >>> table = tf.lookup.StaticVocabularyTable( + ... init, + ... num_oov_buckets=5) + + The `Vocabulary` object will performs the following mapping: + + * `emerson -> 0` + * `lake -> 1` + * `palmer -> 2` + * ` -> bucket_id`, where `bucket_id` will be between `3` and + `3 + num_oov_buckets - 1 = 7`, calculated by: + `hash() % num_oov_buckets + vocab_size` + + If input_tensor is: + + >>> input_tensor = tf.constant(["emerson", "lake", "palmer", + ... "king", "crimson"]) + >>> table[input_tensor].numpy() + array([0, 1, 2, 6, 7]) + + If `initializer` is None, only out-of-vocabulary buckets are used. + + Example usage: + + >>> num_oov_buckets = 3 + >>> vocab = ["emerson", "lake", "palmer", "crimnson"] + >>> import tempfile + >>> f = tempfile.NamedTemporaryFile(delete=False) + >>> f.write('\n'.join(vocab).encode('utf-8')) + >>> f.close() + + >>> init = tf.lookup.TextFileInitializer( + ... f.name, + ... key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, + ... value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER) + >>> table = tf.lookup.StaticVocabularyTable(init, num_oov_buckets) + >>> table.lookup(tf.constant(["palmer", "crimnson" , "king", + ... "tarkus", "black", "moon"])).numpy() + array([2, 3, 5, 6, 6, 4]) + + The hash function used for generating out-of-vocabulary buckets ID is + Fingerprint64. + + Note that the out-of-vocabulary bucket IDs always range from the table `size` + up to `size + num_oov_buckets - 1` regardless of the table values, which could + cause unexpected collisions: + + >>> init = tf.lookup.KeyValueTensorInitializer( + ... keys=tf.constant(["emerson", "lake", "palmer"]), + ... values=tf.constant([1, 2, 3], dtype=tf.int64)) + >>> table = tf.lookup.StaticVocabularyTable( + ... init, + ... num_oov_buckets=1) + >>> input_tensor = tf.constant(["emerson", "lake", "palmer", "king"]) + >>> table[input_tensor].numpy() + array([1, 2, 3, 3]) + """ + + def __init__(self, + initializer, + num_oov_buckets, + lookup_key_dtype=None, + name=None, + experimental_is_anonymous=False): + """Construct a `StaticVocabularyTable` object. + + Args: + initializer: A `TableInitializerBase` object that contains the data used + to initialize the table. If None, then we only use out-of-vocab buckets. + num_oov_buckets: Number of buckets to use for out-of-vocabulary keys. Must + be greater than zero. If out-of-vocab buckets are not required, use + `StaticHashTable` instead. + lookup_key_dtype: Data type of keys passed to `lookup`. Defaults to + `initializer.key_dtype` if `initializer` is specified, otherwise + `tf.string`. Must be string or integer, and must be castable to + `initializer.key_dtype`. + name: A name for the operation (optional). + experimental_is_anonymous: Whether to use anonymous mode for the + table (default is False). In anonymous mode, the table + resource can only be accessed via a resource handle. It can't + be looked up by a name. When all resource handles pointing to + that resource are gone, the resource will be deleted + automatically. + + Raises: + ValueError: when `num_oov_buckets` is not positive. + TypeError: when lookup_key_dtype or initializer.key_dtype are not + integer or string. Also when initializer.value_dtype != int64. + """ + if num_oov_buckets <= 0: + raise ValueError("`num_oov_buckets` must be > 0; use StaticHashTable.") + # If a name ends with a '/' it is a "name scope", remove all trailing '/' + # characters to use as table name. + if name: + name = name.rstrip("/") + if initializer: + if lookup_key_dtype is None: + lookup_key_dtype = initializer.key_dtype + supported_table_key_dtypes = (dtypes.int64, dtypes.string) + if initializer.key_dtype not in supported_table_key_dtypes: + raise TypeError("Invalid `key_dtype`, expected one of %s, but got %s." % + (supported_table_key_dtypes, initializer.key_dtype)) + if initializer.key_dtype.is_integer != lookup_key_dtype.is_integer: + raise TypeError( + "Invalid `key_dtype`, expected %s but got %s." % + ("integer" if lookup_key_dtype.is_integer else "non-integer", + initializer.key_dtype)) + if initializer.value_dtype != dtypes.int64: + raise TypeError("Invalid `value_dtype`, expected %s but got %s." % + (dtypes.int64, initializer.value_dtype)) + if isinstance(initializer, trackable_base.Trackable): + self._initializer = self._track_trackable(initializer, "_initializer") + self._table = HashTable( + initializer, + default_value=-1, + experimental_is_anonymous=experimental_is_anonymous) + name = name or self._table.name + else: + lookup_key_dtype = dtypes.string + self._table = None + name = name or "hash_bucket" + if (not lookup_key_dtype.is_integer) and (dtypes.string != + lookup_key_dtype): + raise TypeError("Invalid `key_dtype`, expected integer or string, got " + f"{lookup_key_dtype}") + self._num_oov_buckets = num_oov_buckets + + self._table_name = None + if name is not None: + self._table_name = name.split("/")[-1] + super(StaticVocabularyTable, self).__init__(lookup_key_dtype, dtypes.int64) + + def _create_resource(self): + if self._table is not None: + return self._table._create_resource() # pylint: disable=protected-access + return None + + def _initialize(self): + if self._table is not None: + return self._table._initialize() # pylint: disable=protected-access + with ops.name_scope(None, "init"): + return control_flow_ops.no_op() + + @property + def resource_handle(self): + if self._table is not None: + return self._table.resource_handle + return None + + @property + def name(self): + return self._table_name + + def size(self, name=None): + """Compute the number of elements in this table.""" + with ops.name_scope(name, "%s_Size" % self.name): + if self._table: + tsize = self._table.size() + else: + tsize = ops.convert_to_tensor(0, dtype=dtypes.int64) + return tsize + self._num_oov_buckets + + def lookup(self, keys, name=None): + """Looks up `keys` in the table, outputs the corresponding values. + + It assigns out-of-vocabulary keys to buckets based in their hashes. + + Args: + keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. + name: Optional name for the op. + + Returns: + A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged, + otherwise a dense `Tensor`. + + Raises: + TypeError: when `keys` doesn't match the table key data type. + """ + if keys.dtype.base_dtype != self._key_dtype: + raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, " + f"received: {keys.dtype}") + values = keys + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)): + values = keys.values + if self._table and (self._table.key_dtype.base_dtype == dtypes.int64): + values = math_ops.cast(values, dtypes.int64) + + # TODO(yleon): Consider moving this functionality to its own kernel. + with ops.name_scope(name, "%s_Lookup" % self.name): + buckets = string_ops.string_to_hash_bucket_fast( + _as_string(values), + num_buckets=self._num_oov_buckets, + name="hash_bucket") + if self._table: + ids = self._table.lookup(values) + buckets = math_ops.add(buckets, self._table.size()) + is_id_non_default = math_ops.not_equal(ids, self._table.default_value) + ids = array_ops.where_v2(is_id_non_default, ids, buckets) + else: + ids = buckets + if isinstance(keys, sparse_tensor.SparseTensor): + return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape) + # TODO(b/296302236): Remove RaggedTensor check by adding ragged + # dispatching. + elif isinstance(keys, internal.RaggedTensor): + return keys.with_values(ids) + return ids + + +@tf_export(v1=["lookup.StaticVocabularyTable"]) +class StaticVocabularyTableV1(StaticVocabularyTable): + + @property + def initializer(self): + if self._table is not None: + return self._table._init_op # pylint: disable=protected-access + with ops.name_scope(None, "init"): + return control_flow_ops.no_op() + + +def index_table_from_file(vocabulary_file=None, + num_oov_buckets=0, + vocab_size=None, + default_value=-1, + hasher_spec=FastHashSpec, + key_dtype=dtypes.string, + name=None, + key_column_index=TextFileIndex.WHOLE_LINE, + value_column_index=TextFileIndex.LINE_NUMBER, + delimiter="\t"): + """Returns a lookup table that converts a string tensor into int64 IDs. + + This operation constructs a lookup table to convert tensor of strings into + int64 IDs. The mapping can be initialized from a vocabulary file specified in + `vocabulary_file`, where the whole line is the key and the zero-based line + number is the ID. + + Any lookup of an out-of-vocabulary token will return a bucket ID based on its + hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the + `default_value`. + The bucket ID range is + `[vocabulary size, vocabulary size + num_oov_buckets - 1]`. + + The underlying table must be initialized by calling + `session.run(tf.compat.v1.tables_initializer())` or + `session.run(table.init())` once. + + To specify multi-column vocabulary files, use key_column_index and + value_column_index and delimiter. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + + Sample Usages: + + If we have a vocabulary file "test.txt" with the following content: + + ``` + emerson + lake + palmer + ``` + + ```python + features = tf.constant(["emerson", "lake", "and", "palmer"]) + table = tf.lookup.index_table_from_file( + vocabulary_file="test.txt", num_oov_buckets=1) + ids = table.lookup(features) + ... + tf.compat.v1.tables_initializer().run() + + ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket + ``` + + Args: + vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`. + num_oov_buckets: The number of out-of-vocabulary buckets. + vocab_size: Number of the elements in the vocabulary, if known. + default_value: The value to use for out-of-vocabulary feature values. + Defaults to -1. + hasher_spec: A `HasherSpec` to specify the hash function to use for + assignation of out-of-vocabulary buckets. + key_dtype: The `key` data type. + name: A name for this op (optional). + key_column_index: The column index from the text file to get the `key` + values from. The default is to use the whole line content. + value_column_index: The column index from the text file to get the `value` + values from. The default is to use the line number, starting from zero. + delimiter: The delimiter to separate fields in a line. + + Returns: + The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`. + + Raises: + ValueError: If `vocabulary_file` is not set. + ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater + than zero. + """ + if vocabulary_file is None or (isinstance(vocabulary_file, str) and + not vocabulary_file): + raise ValueError( + "`vocabulary_file` must be specified and must not be empty.") + if num_oov_buckets < 0: + raise ValueError( + "num_oov_buckets must be greater or equal than 0, got %d." % + num_oov_buckets) + if vocab_size is not None and vocab_size < 1: + vocab_file_value = vocabulary_file + if isinstance(vocabulary_file, tensor_lib.Tensor): + vocab_file_value = tensor_util.constant_value(vocabulary_file) or "?" + raise ValueError("`vocab_size` must be greater than 0, got %d for " + "vocabulary_file: %s." % (vocab_size, vocab_file_value)) + if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype): + raise TypeError("Dtype for `keys` should be either integer or string.") + + with ops.name_scope(name, "string_to_index"): + table = None + with ops.name_scope(None, "hash_table"): + init = TextFileIdTableInitializer( + vocabulary_file, + vocab_size=vocab_size, + key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype, + name="table_init", + key_column_index=key_column_index, + value_column_index=value_column_index, + delimiter=delimiter) + + table = StaticHashTableV1(init, default_value) + if num_oov_buckets: + table = IdTableWithHashBuckets( + table, + num_oov_buckets=num_oov_buckets, + hasher_spec=hasher_spec, + key_dtype=key_dtype) + + return table + + +def index_table_from_tensor(vocabulary_list, + num_oov_buckets=0, + default_value=-1, + hasher_spec=FastHashSpec, + dtype=dtypes.string, + name=None): + """Returns a lookup table that converts a string tensor into int64 IDs. + + This operation constructs a lookup table to convert tensor of strings into + int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D + tensor where each element is a key and corresponding index within the tensor + is the value. + + Any lookup of an out-of-vocabulary token will return a bucket ID based on its + hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the + `default_value`. The bucket ID range is + `[vocabulary list size, vocabulary list size + num_oov_buckets - 1]`. + + The underlying table must be initialized by calling + `session.run(tf.compat.v1.tables_initializer())` or + `session.run(table.init())` once. + + Elements in `vocabulary_list` cannot have duplicates, otherwise when executing + the table initializer op, it will throw a `FailedPreconditionError`. + + Sample Usages: + + ```python + vocabulary_list = tf.constant(["emerson", "lake", "palmer"]) + table = tf.lookup.index_table_from_tensor( + vocabulary_list=vocabulary_list, num_oov_buckets=1, default_value=-1) + features = tf.constant(["emerson", "lake", "and", "palmer"]) + ids = table.lookup(features) + ... + tf.compat.v1.tables_initializer().run() + + ids.eval() ==> [0, 1, 4, 2] + ``` + + Args: + vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to + indices. The type of this object must be castable to `dtype`. + num_oov_buckets: The number of out-of-vocabulary buckets. + default_value: The value to use for out-of-vocabulary feature values. + Defaults to -1. + hasher_spec: A `HasherSpec` to specify the hash function to use for + assignment of out-of-vocabulary buckets. + dtype: The type of values passed to `lookup`. Only string and integers are + supported. + name: A name for this op (optional). + + Returns: + The lookup table to map an input `Tensor` to index `int64` `Tensor`. + + Raises: + ValueError: If `vocabulary_list` is invalid. + ValueError: If `num_oov_buckets` is negative. + """ + if vocabulary_list is None: + raise ValueError("`vocabulary_list` must be specified.") + + if num_oov_buckets < 0: + raise ValueError( + "`num_oov_buckets` must be greater or equal than 0, got %d." % + num_oov_buckets) + + if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype): + raise TypeError("`dtype` must either be integer or string.") + + with ops.name_scope(name, "string_to_index"): + keys = ops.convert_to_tensor(vocabulary_list) + if keys.dtype.is_integer != dtype.is_integer: + raise ValueError( + "Invalid `dtype`: Expected %s, got %s." % + ("integer" if dtype.is_integer else "non-integer", keys.dtype)) + if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype): + raise ValueError("Invalid `dtype`: Expected %s, got %s." % + (dtype, keys.dtype)) + num_elements = array_ops.size(keys) + values = math_ops.cast(math_ops.range(num_elements), dtypes.int64) + + with ops.name_scope(None, "hash_table"): + table_keys = math_ops.cast( + keys, dtypes.int64) if keys.dtype.is_integer else keys + init = KeyValueTensorInitializer( + table_keys, + values, + table_keys.dtype.base_dtype, + dtypes.int64, + name="table_init") + table = StaticHashTableV1(init, default_value) + if num_oov_buckets: + table = IdTableWithHashBuckets( + table, + num_oov_buckets=num_oov_buckets, + hasher_spec=hasher_spec, + key_dtype=dtype) + return table + + +def index_to_string_table_from_file(vocabulary_file, + vocab_size=None, + default_value="UNK", + name=None, + key_column_index=TextFileIndex.LINE_NUMBER, + value_column_index=TextFileIndex.WHOLE_LINE, + delimiter="\t"): + """Returns a lookup table that maps a `Tensor` of indices into strings. + + This operation constructs a lookup table to map int64 indices into string + values. The table is initialized from a vocabulary file specified in + `vocabulary_file`, where the whole line is the value and the + zero-based line number is the index. + + Any input which does not have a corresponding index in the vocabulary file + (an out-of-vocabulary entry) is assigned the `default_value` + + The underlying table must be initialized by calling + `session.run(tf.compat.v1.tables_initializer())` or + `session.run(table.init())` once. + + To specify multi-column vocabulary files, use key_column_index and + value_column_index and delimiter. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + + Sample Usages: + + If we have a vocabulary file "test.txt" with the following content: + + ``` + emerson + lake + palmer + ``` + + ```python + indices = tf.constant([1, 5], tf.int64) + table = tf.lookup.index_to_string_table_from_file( + vocabulary_file="test.txt", default_value="UNKNOWN") + values = table.lookup(indices) + ... + tf.compat.v1.tables_initializer().run() + + values.eval() ==> ["lake", "UNKNOWN"] + ``` + + Args: + vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`. + vocab_size: Number of the elements in the vocabulary, if known. + default_value: The value to use for out-of-vocabulary indices. + name: A name for this op (optional). + key_column_index: The column index from the text file to get the `key` + values from. The default is to use the line number, starting from zero. + value_column_index: The column index from the text file to get the `value` + values from. The default is to use the whole line content. + delimiter: The delimiter to separate fields in a line. + + Returns: + The lookup table to map a string values associated to a given index `int64` + `Tensors`. + + Raises: + ValueError: when `vocabulary_file` is empty. + ValueError: when `vocab_size` is invalid. + """ + if vocabulary_file is None or (isinstance(vocabulary_file, str) and + not vocabulary_file): + raise ValueError( + "`vocabulary_file` must be specified and must not be empty.") + + if vocab_size is not None and vocab_size < 1: + raise ValueError(f"`vocab_size` must be greater than 0, got {vocab_size}.") + + with ops.name_scope(name, "index_to_string"): + init = TextFileStringTableInitializer( + vocabulary_file, + vocab_size=vocab_size, + name="table_init", + key_column_index=key_column_index, + value_column_index=value_column_index, + delimiter=delimiter) + + # TODO(yleon): Use a more efficient structure. + return StaticHashTableV1(init, default_value) + + +def index_to_string_table_from_tensor(vocabulary_list, + default_value="UNK", + name=None): + """Returns a lookup table that maps a `Tensor` of indices into strings. + + This operation constructs a lookup table to map int64 indices into string + values. The mapping is initialized from a string `vocabulary_list` 1-D + `Tensor` where each element is a value and the corresponding index within the + tensor is the key. + + Any input which does not have a corresponding index in 'vocabulary_list' + (an out-of-vocabulary entry) is assigned the `default_value` + + The underlying table must be initialized by calling + `session.run(tf.compat.v1.tables_initializer())` or + `session.run(table.init())` once. + + Elements in `vocabulary_list` cannot have duplicates, otherwise when executing + the table initializer op, it will throw a `FailedPreconditionError`. + + Sample Usages: + + ```python + vocabulary_list = tf.constant(["emerson", "lake", "palmer"]) + indices = tf.constant([1, 5], tf.int64) + table = tf.lookup.index_to_string_table_from_tensor( + vocabulary_list, default_value="UNKNOWN") + values = table.lookup(indices) + ... + tf.compat.v1.tables_initializer().run() + + values.eval() ==> ["lake", "UNKNOWN"] + ``` + + Args: + vocabulary_list: A 1-D string `Tensor` that specifies the strings to map + from indices. + default_value: The value to use for out-of-vocabulary indices. + name: A name for this op (optional). + + Returns: + The lookup table to map a string values associated to a given index `int64` + `Tensors`. + + Raises: + ValueError: when `vocabulary_list` is not set. + """ + + if vocabulary_list is None: + raise ValueError("`vocabulary_list` argument must be specified.") + + with ops.name_scope(name, "index_to_string"): + vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string) + num_elements = array_ops.size(vocabulary_list) + keys = math_ops.cast(math_ops.range(num_elements), dtypes.int64) + + init = KeyValueTensorInitializer( + keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init") + # TODO(yleon): Use a more efficient structure. + return StaticHashTableV1(init, default_value) + + +@tf_export("lookup.experimental.MutableHashTable") +@saveable_compat.legacy_saveable_name("table") +class MutableHashTable(LookupInterface): + """A generic mutable hash table implementation. + + Data can be inserted by calling the `insert` method and removed by calling the + `remove` method. It does not support initialization via the init method. + + `MutableHashTable` requires additional memory during checkpointing and restore + operations to create temporary key and value tensors. + + Example usage: + + >>> table = tf.lookup.experimental.MutableHashTable(key_dtype=tf.string, + ... value_dtype=tf.int64, + ... default_value=-1) + >>> keys_tensor = tf.constant(['a', 'b', 'c']) + >>> vals_tensor = tf.constant([7, 8, 9], dtype=tf.int64) + >>> input_tensor = tf.constant(['a', 'f']) + >>> table.insert(keys_tensor, vals_tensor) + >>> table.lookup(input_tensor).numpy() + array([ 7, -1]) + >>> table.remove(tf.constant(['c'])) + >>> table.lookup(keys_tensor).numpy() + array([ 7, 8, -1]) + >>> sorted(table.export()[0].numpy()) + [b'a', b'b'] + >>> sorted(table.export()[1].numpy()) + [7, 8] + """ + + def __init__(self, + key_dtype, + value_dtype, + default_value, + name="MutableHashTable", + checkpoint=True, + experimental_is_anonymous=False): + """Creates an empty `MutableHashTable` object. + + Creates a table, the type of its keys and values are specified by key_dtype + and value_dtype, respectively. + + Args: + key_dtype: the type of the key tensors. + value_dtype: the type of the value tensors. + default_value: The value to use if a key is missing in the table. + name: A name for the operation (optional). + checkpoint: if True, the contents of the table are saved to and restored + from checkpoints. If `shared_name` is empty for a checkpointed table, it + is shared using the table node name. + experimental_is_anonymous: Whether to use anonymous mode for the + table (default is False). In anonymous mode, the table + resource can only be accessed via a resource handle. It can't + be looked up by a name. When all resource handles pointing to + that resource are gone, the resource will be deleted + automatically. + + Returns: + A `MutableHashTable` object. + + Raises: + ValueError: If checkpoint is True and no name was specified. + """ + self._default_value = ops.convert_to_tensor( + default_value, dtype=value_dtype) + self._value_shape = self._default_value.get_shape() + self._checkpoint = checkpoint + self._key_dtype = key_dtype + self._value_dtype = value_dtype + self._name = name + self._is_anonymous = experimental_is_anonymous + if not self._is_anonymous: + self._shared_name = None + if context.executing_eagerly(): + # TODO(allenl): This will leak memory due to kernel caching by + # the shared_name attribute value (but is better than the + # alternative of sharing everything by default when executing + # eagerly; hopefully creating tables in a loop is uncommon). + self._shared_name = "table_%d" % (ops.uid(),) + super(MutableHashTable, self).__init__(key_dtype, value_dtype) + self._resource_handle = self._create_resource() + if checkpoint: + saveable = MutableHashTable._Saveable(self, name) + if not context.executing_eagerly(): + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + + def _create_resource(self): + if self._is_anonymous: + if self._default_value.get_shape().ndims == 0: + table_ref = gen_lookup_ops.anonymous_mutable_hash_table( + key_dtype=self._key_dtype, + value_dtype=self._value_dtype, + name=self._name) + else: + table_ref = gen_lookup_ops.anonymous_mutable_hash_table_of_tensors( + key_dtype=self._key_dtype, + value_dtype=self._value_dtype, + value_shape=self._default_value.get_shape(), + name=self._name) + else: + # The table must be shared if checkpointing is requested for multi-worker + # training to work correctly. Use the node name if no shared_name has been + # explicitly specified. + use_node_name_sharing = self._checkpoint and self._shared_name is None + if self._default_value.get_shape().ndims == 0: + table_ref = gen_lookup_ops.mutable_hash_table_v2( + shared_name=self._shared_name, + use_node_name_sharing=use_node_name_sharing, + key_dtype=self._key_dtype, + value_dtype=self._value_dtype, + name=self._name) + else: + table_ref = gen_lookup_ops.mutable_hash_table_of_tensors_v2( + shared_name=self._shared_name, + use_node_name_sharing=use_node_name_sharing, + key_dtype=self._key_dtype, + value_dtype=self._value_dtype, + value_shape=self._default_value.get_shape(), + name=self._name) + + if context.executing_eagerly(): + self._table_name = None + else: + self._table_name = table_ref.op.name.split("/")[-1] + return table_ref + + @property + def name(self): + return self._table_name + + def size(self, name=None): + """Compute the number of elements in this table. + + Args: + name: A name for the operation (optional). + + Returns: + A scalar tensor containing the number of elements in this table. + """ + with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]): + with ops.colocate_with(self.resource_handle): + return gen_lookup_ops.lookup_table_size_v2(self.resource_handle) + + def remove(self, keys, name=None): + """Removes `keys` and its associated values from the table. + + If a key is not present in the table, it is silently ignored. + + Args: + keys: Keys to remove. Can be a tensor of any shape. Must match the table's + key type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` do not match the table data types. + """ + if keys.dtype != self._key_dtype: + raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, " + f"received: {keys.dtype}") + + with ops.name_scope(name, "%s_lookup_table_remove" % self.name, + (self.resource_handle, keys, self._default_value)): + op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys) + + return op + + def lookup(self, keys, dynamic_default_values=None, name=None): + """Looks up `keys` in a table, outputs the corresponding values. + + The `default_value` is used for keys not present in the table. + + Args: + keys: Keys to look up. Can be a tensor of any shape. Must match the + table's key_dtype. + dynamic_default_values: The values to use if a key is missing in the + table. If None (by default), the `table.default_value` will be used. + Shape of `dynamic_default_values` must be same with + `table.default_value` or the lookup result tensor. + In the latter case, each key will have a different default value. + + For example: + + ```python + keys = [0, 1, 3] + dynamic_default_values = [[1, 3, 4], [2, 3, 9], [8, 3, 0]] + + # The key '0' will use [1, 3, 4] as default value. + # The key '1' will use [2, 3, 9] as default value. + # The key '3' will use [8, 3, 0] as default value. + ``` + + name: A name for the operation (optional). + + Returns: + A tensor containing the values in the same shape as `keys` using the + table's value type. + + Raises: + TypeError: when `keys` do not match the table data types. + """ + with ops.name_scope(name, "%s_lookup_table_find" % self.name, + (self.resource_handle, keys, self._default_value)): + keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys") + with ops.colocate_with(self.resource_handle): + values = gen_lookup_ops.lookup_table_find_v2( + self.resource_handle, keys, dynamic_default_values + if dynamic_default_values is not None else self._default_value) + return values + + def insert(self, keys, values, name=None): + """Associates `keys` with `values`. + + Args: + keys: Keys to insert. Can be a tensor of any shape. Must match the table's + key type. + values: Values to be associated with keys. Must be a tensor of the same + shape as `keys` and match the table's value type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` or `values` doesn't match the table data + types. + """ + with ops.name_scope(name, "%s_lookup_table_insert" % self.name, + [self.resource_handle, keys, values]): + keys = ops.convert_to_tensor(keys, self._key_dtype, name="keys") + values = ops.convert_to_tensor(values, self._value_dtype, name="values") + with ops.colocate_with(self.resource_handle): + # pylint: disable=protected-access + op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, + values) + return op + + def export(self, name=None): + """Returns tensors of all keys and values in the table. + + Args: + name: A name for the operation (optional). + + Returns: + A pair of tensors with the first tensor containing all keys and the + second tensors containing all values in the table. + """ + with ops.name_scope(name, "%s_lookup_table_export_values" % self.name, + [self.resource_handle]): + with ops.colocate_with(self.resource_handle): + exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2( + self.resource_handle, self._key_dtype, self._value_dtype) + return exported_keys, exported_values + + def _serialize_to_tensors(self): + """Implements checkpointing protocols for `Trackable`.""" + tensors = self.export() + return {"-keys": tensors[0], "-values": tensors[1]} + + def _restore_from_tensors(self, restored_tensors): + """Implements checkpointing protocols for `Trackable`.""" + with ops.name_scope("%s_table_restore" % self._name): + with ops.colocate_with(self.resource_handle): + return gen_lookup_ops.lookup_table_import_v2( + self.resource_handle, + restored_tensors["-keys"], + restored_tensors["-values"]) + + def _copy_trackable_to_cpu(self, object_map): + """Implements checkpointing protocols for `Trackable`.""" + if self not in object_map: + # If self is not already populated in object map, instantiate the copy + object_map[self] = MutableHashTable( + self._key_dtype, + self._value_dtype, + self._default_value, + self._name, + self._checkpoint, + self._is_anonymous + ) + + # Copy values from `self` to copy of `self` + serialized = self._serialize_to_tensors() + object_map[self]._restore_from_tensors(serialized) # pylint: disable=protected-access + + # This class is needed for `MutableHashTable(checkpoint=True)`. + class _Saveable(BaseSaverBuilder.SaveableObject): + """SaveableObject implementation for DenseHashTable.""" + + def __init__(self, table, name, table_name=None): + tensors = table.export() + specs = [ + BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"), + BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values") + ] + self.table_name = table_name or name + # pylint: disable=protected-access + super(MutableHashTable._Saveable, self).__init__(table, specs, name) + + def restore(self, restored_tensors, restored_shapes): + del restored_shapes # unused + # pylint: disable=protected-access + with ops.name_scope("%s_table_restore" % self.table_name): + with ops.colocate_with(self.op.resource_handle): + return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle, + restored_tensors[0], + restored_tensors[1]) + + +@tf_export("lookup.experimental.DenseHashTable") +@saveable_compat.legacy_saveable_name("table") +class DenseHashTable(LookupInterface): + """A mutable hash table with faster lookups and higher memory usage. + + Data can be inserted by calling the `insert` method and removed by calling the + `remove` method. It does not support initialization via the init method. + + Compared to `MutableHashTable`, `DenseHashTable` offers generally faster + `insert`, `remove` and `lookup` operations, in exchange for a higher overall + memory footprint. + + It uses "open addressing" with quadratic reprobing to resolve collisions. This + requires specifying two keys in the key space, `empty_key` and `deleted_key`, + that can never inserted into the table. + + Unlike `MutableHashTable`, `DenseHashTable` does not require additional memory + for temporary tensors created during checkpointing and restore operations. + + Example usage: + + >>> table = tf.lookup.experimental.DenseHashTable( + ... key_dtype=tf.string, + ... value_dtype=tf.int64, + ... default_value=-1, + ... empty_key='', + ... deleted_key='$') + >>> keys = tf.constant(['a', 'b', 'c']) + >>> values = tf.constant([0, 1, 2], dtype=tf.int64) + >>> table.insert(keys, values) + >>> table.remove(tf.constant(['c'])) + >>> table.lookup(tf.constant(['a', 'b', 'c','d'])).numpy() + array([ 0, 1, -1, -1]) + """ + + # TODO(andreasst): consider extracting common code with MutableHashTable into + # a common superclass. + def __init__(self, + key_dtype, + value_dtype, + default_value, + empty_key, + deleted_key, + initial_num_buckets=None, + name="MutableDenseHashTable", + checkpoint=True, + experimental_is_anonymous=False): + """Creates an empty `DenseHashTable` object. + + Creates a table, the type of its keys and values are specified by key_dtype + and value_dtype, respectively. + + Args: + key_dtype: the type of the key tensors. + value_dtype: the type of the value tensors. + default_value: The value to use if a key is missing in the table. + empty_key: the key to use to represent empty buckets internally. Must not + be used in insert, remove or lookup operations. + deleted_key: the key to use to represent deleted buckets internally. Must + not be used in insert, remove or lookup operations and be different from + the empty_key. + initial_num_buckets: the initial number of buckets (optional, + default to 2^17=131072). Note that the default value is + relatively large (~1MB), so if you are going to create many + tables (likely the case when `experimental_is_anonymous` is + `True`), you should set `initial_num_buckets` to a smaller + value to reduce memory usage. + name: A name for the operation (optional). + checkpoint: if True, the contents of the table are saved to and restored + from checkpoints. If `shared_name` is empty for a checkpointed table, it + is shared using the table node name. + experimental_is_anonymous: Whether to use anonymous mode for the + table (default is False). In anonymous mode, the table + resource can only be accessed via a resource handle. It can't + be looked up by a name. When all resource handles pointing to + that resource are gone, the resource will be deleted + automatically. + + Returns: + A `DenseHashTable` object. + + Raises: + ValueError: If checkpoint is True and no name was specified. + """ + self._default_value = ops.convert_to_tensor( + default_value, dtype=value_dtype, name="default_value") + self._key_dtype = key_dtype + self._value_dtype = value_dtype + # TODO(b/201578996): Pick a good default for initial_num_buckets + # other than 2^17. + self._initial_num_buckets = initial_num_buckets + self._value_shape = self._default_value.get_shape() + self._checkpoint = checkpoint + self._name = name + self._empty_key = empty_key + self._deleted_key = deleted_key + self._is_anonymous = experimental_is_anonymous + if not self._is_anonymous: + self._shared_name = None + if context.executing_eagerly(): + # TODO(allenl): This will leak memory due to kernel caching by + # the shared_name attribute value (but is better than the + # alternative of sharing everything by default when executing + # eagerly; hopefully creating tables in a loop is uncommon). + self._shared_name = "table_%d" % (ops.uid(),) + super(DenseHashTable, self).__init__(key_dtype, value_dtype) + self._resource_handle = self._create_resource() + if checkpoint: + saveable = DenseHashTable._Saveable(self, name) + if not context.executing_eagerly(): + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + + def _create_resource(self): + empty_key = ops.convert_to_tensor( + self._empty_key, dtype=self._key_dtype, name="empty_key") + deleted_key = ops.convert_to_tensor( + self._deleted_key, dtype=self._key_dtype, name="deleted_key") + if self._is_anonymous: + table_ref = gen_lookup_ops.anonymous_mutable_dense_hash_table( + empty_key=empty_key, + deleted_key=deleted_key, + value_dtype=self._value_dtype, + value_shape=self._value_shape, + initial_num_buckets=self._initial_num_buckets, + name=self._name) + else: + # The table must be shared if checkpointing is requested for multi-worker + # training to work correctly. Use the node name if no shared_name has been + # explicitly specified. + use_node_name_sharing = self._checkpoint and self._shared_name is None + table_ref = gen_lookup_ops.mutable_dense_hash_table_v2( + empty_key=empty_key, + deleted_key=deleted_key, + shared_name=self._shared_name, + use_node_name_sharing=use_node_name_sharing, + value_dtype=self._value_dtype, + value_shape=self._value_shape, + initial_num_buckets=self._initial_num_buckets, + name=self._name) + if context.executing_eagerly(): + self._table_name = None + else: + self._table_name = table_ref.op.name.split("/")[-1] + return table_ref + + @property + def name(self): + return self._table_name + + def size(self, name=None): + """Compute the number of elements in this table. + + Args: + name: A name for the operation (optional). + + Returns: + A scalar tensor containing the number of elements in this table. + """ + with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]): + with ops.colocate_with(self.resource_handle): + return gen_lookup_ops.lookup_table_size_v2(self.resource_handle) + + def lookup(self, keys, name=None): + """Looks up `keys` in a table, outputs the corresponding values. + + The `default_value` is used for keys not present in the table. + + Args: + keys: Keys to look up. Can be a tensor of any shape. Must match the + table's key_dtype. + name: A name for the operation (optional). + + Returns: + A tensor containing the values in the same shape as `keys` using the + table's value type. + + Raises: + TypeError: when `keys` do not match the table data types. + """ + with ops.name_scope(name, "%s_lookup_table_find" % self.name, + [self.resource_handle, keys]): + keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys") + with ops.colocate_with(self.resource_handle): + values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys, + self._default_value) + + return values + + def insert_or_assign(self, keys, values, name=None): + """Associates `keys` with `values`. + + Args: + keys: Keys to insert. Can be a tensor of any shape. Must match the table's + key type. + values: Values to be associated with keys. Must be a tensor of the same + shape as `keys` and match the table's value type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` or `values` doesn't match the table data + types. + """ + with ops.name_scope(name, "%s_lookup_table_insert" % self.name, + [self.resource_handle, keys, values]): + keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys") + values = ops.convert_to_tensor( + values, dtype=self._value_dtype, name="values") + with ops.colocate_with(self.resource_handle): + op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, + values) + return op + + def insert(self, keys, values, name=None): + """Associates `keys` with `values`. + + Args: + keys: Keys to insert. Can be a tensor of any shape. Must match the table's + key type. + values: Values to be associated with keys. Must be a tensor of the same + shape as `keys` and match the table's value type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` or `values` doesn't match the table data + types. + """ + return self.insert_or_assign(keys, values, name) + + def erase(self, keys, name=None): + """Removes `keys` and its associated values from the table. + + If a key is not present in the table, it is silently ignored. + + Args: + keys: Keys to remove. Can be a tensor of any shape. Must match the table's + key type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` do not match the table data types. + """ + if keys.dtype != self._key_dtype: + raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." % + (self._key_dtype, keys.dtype)) + + with ops.name_scope(name, "%s_lookup_table_remove" % self.name, + (self.resource_handle, keys, self._default_value)): + # pylint: disable=protected-access + op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys) + + return op + + def remove(self, keys, name=None): + """Removes `keys` and its associated values from the table. + + If a key is not present in the table, it is silently ignored. + + Args: + keys: Keys to remove. Can be a tensor of any shape. Must match the table's + key type. + name: A name for the operation (optional). + + Returns: + The created Operation. + + Raises: + TypeError: when `keys` do not match the table data types. + """ + return self.erase(keys, name) + + def export(self, name=None): + """Returns tensors of all keys and values in the table. + + Args: + name: A name for the operation (optional). + + Returns: + A pair of tensors with the first tensor containing all keys and the + second tensors containing all values in the table. + """ + with ops.name_scope(name, "%s_lookup_table_export_values" % self.name, + [self.resource_handle]): + with ops.colocate_with(self.resource_handle): + exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2( + self.resource_handle, self._key_dtype, self._value_dtype) + + return exported_keys, exported_values + + def _serialize_to_tensors(self): + """Implements checkpointing interface in `Trackable`.""" + tensors = self.export() + return {"-keys": tensors[0], "-values": tensors[1]} + + def _restore_from_tensors(self, restored_tensors): + """Implements checkpointing interface in `Trackable`.""" + with ops.name_scope("%s_table_restore" % self._name): + with ops.colocate_with(self.resource_handle): + return gen_lookup_ops.lookup_table_import_v2( + self.resource_handle, + restored_tensors["-keys"], + restored_tensors["-values"]) + + def _copy_trackable_to_cpu(self, object_map): + """Implements checkpointing protocols for `Trackable`.""" + if self not in object_map: + # If self is not already populated in object map, instantiate the copy + object_map[self] = DenseHashTable( + self._key_dtype, + self._value_dtype, + self._default_value, + self._empty_key, + self._deleted_key, + self._initial_num_buckets, + self._name, + self._checkpoint, + self._is_anonymous + ) + + # Copy values from `self` to copy of `self` + serialized = self._serialize_to_tensors() + object_map[self]._restore_from_tensors(serialized) # pylint: disable=protected-access + + # This class is needed for `DenseHashTable(checkpoint=True)`. + class _Saveable(BaseSaverBuilder.SaveableObject): + """SaveableObject implementation for DenseHashTable.""" + + def __init__(self, table, name, table_name=None): + tensors = table.export() + specs = [ + BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"), + BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values") + ] + self.table_name = table_name or name + # pylint: disable=protected-access + super(DenseHashTable._Saveable, self).__init__(table, specs, name) + + def restore(self, restored_tensors, restored_shapes): + del restored_shapes # unused + # pylint: disable=protected-access + with ops.name_scope("%s_table_restore" % self.table_name): + with ops.colocate_with(self.op.resource_handle): + return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle, + restored_tensors[0], + restored_tensors[1]) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..29c695c0da2a4076a6bad167ccead7213a8b5f80 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_ops.py @@ -0,0 +1,5672 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Math Operations. + +Note: Functions taking `Tensor` arguments can also take anything accepted by +`tf.convert_to_tensor`. + +Note: Elementwise binary operations in TensorFlow follow [numpy-style +broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + +TensorFlow provides a variety of math functions including: + +* Basic arithmetic operators and trigonometric functions. +* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`) +* Complex number functions (like: `tf.math.imag` and `tf.math.angle`) +* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`) +* Segment functions (like: `tf.math.segment_sum`) + +See: `tf.linalg` for matrix and tensor functions. + + + +## About Segmentation + +TensorFlow provides several operations that you can use to perform common +math computations on tensor segments. +Here a segmentation is a partitioning of a tensor along +the first dimension, i.e. it defines a mapping from the first dimension onto +`segment_ids`. The `segment_ids` tensor should be the size of +the first dimension, `d0`, with consecutive IDs in the range `0` to `k`, +where `k [[0 0 0 0] +# [5 6 7 8]] +``` + +The standard `segment_*` functions assert that the segment indices are sorted. +If you have unsorted indices use the equivalent `unsorted_segment_` function. +These functions take an additional argument `num_segments` so that the output +tensor can be efficiently allocated. + +``` python +c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) +tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) +# ==> [[ 6, 8, 10, 12], +# [-1, -2, -3, -4]] +``` + +API docstring: tensorflow.math +""" +import builtins +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import override_binary_operator +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_conversion_registry +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_bitwise_ops +from tensorflow.python.ops import gen_data_flow_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_nn_ops +from tensorflow.python.ops import gen_sparse_ops +from tensorflow.python.ops import tensor_math_operator_overrides # pylint: disable=unused-import +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_math_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import compat +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc +from tensorflow.python.util.tf_export import tf_export + + +# Aliases for some automatically-generated names. +nextafter = gen_math_ops.next_after + + +@tf_export("linspace", v1=["lin_space", "linspace"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("lin_space") +def linspace_nd(start, stop, num, name=None, axis=0): + r"""Generates evenly-spaced values in an interval along a given axis. + + A sequence of `num` evenly-spaced values are generated beginning at `start` + along a given `axis`. + If `num > 1`, the values in the sequence increase by + `(stop - start) / (num - 1)`, so that the last one is exactly `stop`. + If `num <= 0`, `ValueError` is raised. + + Matches + [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s + behaviour + except when `num == 0`. + + For example: + + ``` + tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + ``` + + `Start` and `stop` can be tensors of arbitrary size: + + >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0) + + + `Axis` is where the values will be generated (the dimension in the + returned tensor which corresponds to the axis will be equal to `num`) + + >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1) + + + + + Args: + start: A `Tensor`. Must be one of the following types: `bfloat16`, + `float32`, `float64`. N-D tensor. First entry in the range. + stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor. + Last entry in the range. + num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D + tensor. Number of values to generate. + name: A name for the operation (optional). + axis: Axis along which the operation is performed (used only when N-D + tensors are provided). + + Returns: + A `Tensor`. Has the same type as `start`. + """ + + with ops.name_scope(name, "linspace", [start, stop]): + start = ops.convert_to_tensor(start, name="start") + # stop must be convertible to the same dtype as start + stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype) + num_int = array_ops.convert_to_int_tensor(num, name="num") + num = cast(num_int, dtype=start.dtype) + + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(start), array_ops.shape(stop)) + start = array_ops.broadcast_to(start, broadcast_shape) + stop = array_ops.broadcast_to(stop, broadcast_shape) + + expanded_start = array_ops.expand_dims(start, axis=axis) + expanded_stop = array_ops.expand_dims(stop, axis=axis) + + shape = array_ops.shape(expanded_start) + ndims = array_ops.shape(shape)[0] + + axis = array_ops.where_v2(axis >= 0, axis, ndims + axis) + + # The purpose is to avoid having negative values when repeating. + num_fill = gen_math_ops.maximum(num_int - 2, 0) + # To avoid having negative values in the range or zero division + # the result is sliced in the end so a correct result is returned for + # num == 1, and num == 0. + n_steps = gen_math_ops.maximum(num_int - 1, 1) + delta = (expanded_stop - expanded_start) / cast(n_steps, + expanded_stop.dtype) + # Re-cast tensors as delta. + expanded_start = cast(expanded_start, delta.dtype) + expanded_stop = cast(expanded_stop, delta.dtype) + # If num < 0, we will throw exception in the range + # otherwise use the same div for delta + range_end = array_ops.where_v2(num_int >= 0, n_steps, -1) + # Even though range supports an output dtype, its limited + # (e.g. doesn't support half at the moment). + desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype) + mask = gen_math_ops.equal(axis, range(ndims)) + # desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the + # index of num_fill is equal to axis. + desired_range_shape = array_ops.where_v2(mask, num_fill, 1) + desired_range = array_ops.reshape(desired_range, desired_range_shape) + + res = expanded_start + delta * desired_range + + # Add the start and endpoints to the result, and slice out the desired + # portion. + all_tensors = (expanded_start, res, expanded_stop) + concatenated = array_ops.concat(all_tensors, axis=axis) + begin = array_ops.zeros_like(shape) + # Preserve shape information for final slice. + size = array_ops.concat( + (shape[0:axis], array_ops.reshape(num_int, [1]), shape[axis + 1 :]), + axis=0, + ) + return array_ops.slice(concatenated, begin, size) + + +linspace = linspace_nd + +arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment +arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment +tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max)) +tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min)) + + +def _set_doc(doc): + + def _decorator(func): + func.__doc__ = doc + return func + + return _decorator + + +# pylint: disable=redefined-builtin +@tf_export(v1=["math.argmax", "argmax"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, "Use the `axis` argument instead", + "dimension") +@_set_doc( + gen_math_ops.arg_max.__doc__.replace("dimensions", + "axes").replace("dimension", "axis")) +def argmax(input, + axis=None, + name=None, + dimension=None, + output_type=dtypes.int64): + axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension", + dimension) + return argmax_v2(input, axis, output_type, name) + + +@tf_export("math.argmax", "argmax", v1=[]) +@dispatch.add_dispatch_support +def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None): + """Returns the index with the largest value across axes of a tensor. + + In case of identity returns the smallest index. + + For example: + + >>> A = tf.constant([2, 20, 30, 3, 6]) + >>> tf.math.argmax(A) # A[2] is maximum in tensor A + + >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8], + ... [14, 45, 23, 5, 27]]) + >>> tf.math.argmax(B, 0) + + >>> tf.math.argmax(B, 1) + + >>> C = tf.constant([0, 0, 0, 0]) + >>> tf.math.argmax(C) # Returns smallest index in case of ties + + + Args: + input: A `Tensor`. + axis: An integer, the axis to reduce across. Default to 0. + output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults + to `tf.int64`. + name: An optional name for the operation. + + Returns: + A `Tensor` of type `output_type`. + """ + if axis is None: + axis = 0 + return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type) + + +@tf_export(v1=["math.argmin", "argmin"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, "Use the `axis` argument instead", + "dimension") +@_set_doc( + gen_math_ops.arg_min.__doc__.replace("dimensions", + "axes").replace("dimension", "axis")) +def argmin(input, + axis=None, + name=None, + dimension=None, + output_type=dtypes.int64): + axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension", + dimension) + return argmin_v2(input, axis, output_type, name) + + +@tf_export("math.argmin", "argmin", v1=[]) +@dispatch.add_dispatch_support +def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None): + """Returns the index with the smallest value across axes of a tensor. + + Returns the smallest index in case of ties. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, + `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, + `uint64`. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + int32 or int64, must be in the range `-rank(input), rank(input))`. + Describes which axis of the input Tensor to reduce across. For vectors, + use axis = 0. + output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to + `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `output_type`. + + Usage: + ```python + import tensorflow as tf + a = [1, 10, 26.9, 2.8, 166.32, 62.3] + b = tf.math.argmin(input = a) + c = tf.keras.backend.eval(b) + # c = 0 + # here a[0] = 1 which is the smallest element of a across axis 0 + ``` + """ + if axis is None: + axis = 0 + return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type) + + +# pylint: enable=redefined-builtin + + +# pylint: disable=anomalous-backslash-in-string,protected-access +# pylint: disable=g-docstring-has-escape +@tf_export("math.abs", "abs") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def abs(x, name=None): # pylint: disable=redefined-builtin + r"""Computes the absolute value of a tensor. + + Given a tensor of integer or floating-point values, this operation returns a + tensor of the same type, where each element contains the absolute value of the + corresponding element in the input. + + Given a tensor `x` of complex numbers, this operation returns a tensor of type + `float32` or `float64` that is the absolute value of each element in `x`. For + a complex number \\(a + bj\\), its absolute value is computed as + \\(\sqrt{a^2 + b^2}\\). + + For example: + + >>> # real number + >>> x = tf.constant([-2.25, 3.25]) + >>> tf.abs(x) + + + >>> # complex number + >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]]) + >>> tf.abs(x) + + + Args: + x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`, + `int32`, `int64`, `complex64` or `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`, + with absolute values. Note, for `complex64` or `complex128` input, the + returned `Tensor` will be of type `float32` or `float64`, respectively. + """ + with ops.name_scope(name, "Abs", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_complex: + return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) + return gen_math_ops._abs(x, name=name) + + +# pylint: enable=g-docstring-has-escape + + +# pylint: disable=redefined-builtin +def _bucketize(input, boundaries, name=None): + return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name) + + +# pylint: enable=redefined-builtin + + +class DivideDelegateWithName: + """Use Python2/Python3 division delegation to implement divide for tensors.""" + + def __init__(self, x, name): + """Construct DivideDelegateWithName. + + Args: + x: Tensor to use as left operand in operator overloads + name: The name that is preferred for the op created. + """ + self.x = x + self.name = name + + def __truediv__(self, y): + return _truediv_python3(self.x, y, self.name) + + def __floordiv__(self, y): + return floordiv(self.x, y, self.name) + + def __div__(self, y): + return _div_python2(self.x, y, self.name) + + +@tf_export("math.divide", "divide") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def divide(x, y, name=None): + """Computes Python style division of `x` by `y`. + + For example: + + >>> x = tf.constant([16, 12, 11]) + >>> y = tf.constant([4, 6, 2]) + >>> tf.divide(x,y) + + + Args: + x: A `Tensor` + y: A `Tensor` + name: A name for the operation (optional). + + Returns: + A `Tensor` with same shape as input + """ + + if name is not None: + # Cannot use tensors operator overload, because it has no way to track + # override names. Use a dummy class to track the runtime division behavior + return DivideDelegateWithName(x, name) / y + else: + # We do conversion here to make sure at least x is a tensor. + if not tensor_util.is_tf_type(x): + dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None + x = ops.convert_to_tensor(x, dtype=dtype) + return x / y + + +@tf_export("math.multiply", "multiply") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def multiply(x, y, name=None): + """Returns an element-wise x * y. + + For example: + + >>> x = tf.constant(([1, 2, 3, 4])) + >>> tf.math.multiply(x, x) + + + Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also + pass in non-`Tensor` arguments: + + >>> tf.math.multiply(7,6) + + + If `x.shape` is not the same as `y.shape`, they will be broadcast to a + compatible shape. (More about broadcasting + [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).) + + For example: + + >>> x = tf.ones([1, 2]); + >>> y = tf.ones([2, 1]); + >>> x * y # Taking advantage of operator overriding + + + The reduction version of this elementwise operation is `tf.math.reduce_prod` + + Args: + x: A Tensor. Must be one of the following types: `bfloat16`, + `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, + `int16`, `int32`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + + A `Tensor`. Has the same type as `x`. + + Raises: + + * InvalidArgumentError: When `x` and `y` have incompatible shapes or types. + """ + + return gen_math_ops.mul(x, y, name) + + +# TODO(aselle): put deprecation in after another round of global code changes +@deprecation.deprecated( + "2016-12-30", + "`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`") +def _mul(x, y, name=None): + return gen_math_ops.mul(x, y, name) + + +_mul.__doc__ = ( + gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__)) + + +@tf_export("math.subtract", "subtract") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def subtract(x, y, name=None): + return gen_math_ops.sub(x, y, name) + + +subtract.__doc__ = gen_math_ops.sub.__doc__ + + +# TODO(aselle): put deprecation in after another round of global code changes +@deprecation.deprecated( + "2016-12-30", + "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`") +def _sub(x, y, name=None): + return gen_math_ops.sub(x, y, name) + + +_sub.__doc__ = ( + gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__)) + +negative = gen_math_ops.neg + + +# pylint: disable=g-docstring-has-escape +@deprecation.deprecated( + "2016-12-30", + "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`") +def _neg(x, name=None): + """Computes numerical negative value element-wise. + + I.e., \\(y = -x\\). + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + """ + return negative(x, name) + + +# pylint: enable=g-docstring-has-escape + + +@tf_export(v1=["math.scalar_mul", "scalar_mul"]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def scalar_mul(scalar, x, name=None): + """Multiplies a scalar times a `Tensor` or `IndexedSlices` object. + + This is a special case of `tf.math.multiply`, where the first value must be a + `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is + guaranteed to be efficient for `tf.IndexedSlices`. + + >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3]) + >>> with tf.GradientTape() as g: + ... g.watch(x) + ... y = tf.gather(x, [1, 2]) # IndexedSlices + ... z = tf.math.scalar_mul(10.0, y) + + Args: + scalar: A 0-D scalar `Tensor`. Must have known shape. + x: A `Tensor` or `IndexedSlices` to be scaled. + name: A name for the operation (optional). + + Returns: + `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`. + + Raises: + ValueError: if scalar is not a 0-D `scalar`. + """ + base_dtype = dtypes.as_dtype(x.dtype).base_dtype + scalar = ops.convert_to_tensor( + scalar, dtype=base_dtype, name="scalar") + shape = scalar.get_shape() + if shape.ndims == 0: + if isinstance(x, indexed_slices.IndexedSlices): + return indexed_slices.IndexedSlices( + gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape) + else: + return gen_math_ops.mul(scalar, x, name) + else: + raise ValueError( + f"The input scalar must be a 0-D value. Received shape {shape}.") + + +@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def softplus(features, name=None): + """Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`. + + `softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always + takes on positive values. + + + + Example: + + >>> import tensorflow as tf + >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy() + array([0.6931472, 1.3132616], dtype=float32) + + Args: + features: `Tensor` + name: Optional: name to associate with this operation. + Returns: + `Tensor` + """ + return gen_nn_ops.softplus(features, name) + + +@tf_export("math.scalar_mul", "scalar_mul", v1=[]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +@_set_doc(scalar_mul.__doc__) +def scalar_mul_v2(scalar, x, name=None): + with ops.name_scope(name, "scalar_mul", [x]) as name: + return scalar_mul(scalar, x, name) + + +@tf_export("math.pow", "pow") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def pow(x, y, name=None): # pylint: disable=redefined-builtin + r"""Computes the power of one value to another. + + Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for + corresponding elements in `x` and `y`. For example: + + ```python + x = tf.constant([[2, 2], [3, 3]]) + y = tf.constant([[8, 16], [2, 3]]) + tf.pow(x, y) # [[256, 65536], [9, 27]] + ``` + + Args: + x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, + `complex64`, or `complex128`. + y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, + `complex64`, or `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. + """ + with ops.name_scope(name, "Pow", [x]) as name: + return gen_math_ops._pow(x, y, name=name) + + +# pylint: disable=redefined-builtin,redefined-outer-name +@tf_export("dtypes.complex", "complex") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def complex(real, imag, name=None): + r"""Converts two real numbers to a complex number. + + Given a tensor `real` representing the real part of a complex number, and a + tensor `imag` representing the imaginary part of a complex number, this + operation returns complex numbers elementwise of the form \\(a + bj\\), where + *a* represents the `real` part and *b* represents the `imag` part. + + The input tensors `real` and `imag` must have the same shape. + + For example: + + ```python + real = tf.constant([2.25, 3.25]) + imag = tf.constant([4.75, 5.75]) + tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]] + ``` + + Args: + real: A `Tensor`. Must be one of the following types: `float32`, `float64`. + imag: A `Tensor`. Must have the same type as `real`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `complex64` or `complex128`. + + Raises: + TypeError: Real and imag must be correct types + """ + real = ops.convert_to_tensor(real, name="real") + imag = ops.convert_to_tensor(imag, name="imag") + with ops.name_scope(name, "Complex", [real, imag]) as name: + input_types = (real.dtype, imag.dtype) + if input_types == (dtypes.float64, dtypes.float64): + Tout = dtypes.complex128 + elif input_types == (dtypes.float32, dtypes.float32): + Tout = dtypes.complex64 + else: + raise TypeError( + f"The `real` and `imag` components have incorrect types: " + f"{real.dtype.name} {imag.dtype.name}. They must be consistent, and " + f"one of {[dtypes.float32, dtypes.float64]}") + return gen_math_ops._complex(real, imag, Tout=Tout, name=name) + + +@tf_export("math.sign", "sign") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def sign(x, name=None): + r"""Returns an element-wise indication of the sign of a number. + + `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`. + + For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`. + + Example usage: + + >>> # real number + >>> tf.math.sign([0., 2., -3.]) + + + >>> # complex number + >>> tf.math.sign([1 + 1j, 0 + 0j]) + + + Args: + x: A Tensor. Must be one of the following types: bfloat16, half, float32, + float64, int32, int64, complex64, complex128. + name: A name for the operation (optional). + + Returns: + A Tensor. Has the same type as x. + + If x is a SparseTensor, returns SparseTensor(x.indices, + tf.math.sign(x.values, ...), x.dense_shape). + """ + x = ops.convert_to_tensor(x) + if x.dtype.is_complex: + return gen_math_ops.div_no_nan( + x, + cast( + gen_math_ops.complex_abs( + x, + Tout=dtypes.float32 + if x.dtype == dtypes.complex64 else dtypes.float64), + dtype=x.dtype), + name=name) + return gen_math_ops.sign(x, name=name) + + +@tf_export("math.real", v1=["math.real", "real"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("real") +def real(input, name=None): + r"""Returns the real part of a complex (or real) tensor. + + Given a tensor `input`, this operation returns a tensor of type `float` that + is the real part of each element in `input` considered as a complex number. + + For example: + + ```python + x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) + tf.math.real(x) # [-2.25, 3.25] + ``` + + If `input` is already real, it is returned unchanged. + + Args: + input: A `Tensor`. Must have numeric type. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32` or `float64`. + """ + with ops.name_scope(name, "Real", [input]) as name: + input = ops.convert_to_tensor(input, name="input") + if input.dtype.is_complex: + real_dtype = input.dtype.real_dtype + return gen_math_ops.real(input, Tout=real_dtype, name=name) + elif input.dtype.is_numeric: + return input + else: + raise TypeError( + "input must be a numeric tensor, but got tensor with dtype {}".format( + input.dtype + ) + ) + + +@tf_export("math.imag", v1=["math.imag", "imag"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("imag") +def imag(input, name=None): + r"""Returns the imaginary part of a complex (or real) tensor. + + Given a tensor `input`, this operation returns a tensor of type `float` that + is the imaginary part of each element in `input` considered as a complex + number. If `input` is real, a tensor of all zeros is returned. + + For example: + + ```python + x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) + tf.math.imag(x) # [4.75, 5.75] + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `float`, `double`, + `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32` or `float64`. + """ + with ops.name_scope(name, "Imag", [input]) as name: + input = ops.convert_to_tensor(input, name="input") + if input.dtype.is_complex: + return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name) + else: + return array_ops.zeros_like(input) + + +@tf_export("math.angle", v1=["math.angle", "angle"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("angle") +def angle(input, name=None): + r"""Returns the element-wise argument of a complex (or real) tensor. + + Given a tensor `input`, this operation returns a tensor of type `float` that + is the argument of each element in `input` considered as a complex number. + + The elements in `input` are considered to be complex numbers of the form + \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. + If `input` is real then *b* is zero by definition. + + The argument returned by this function is of the form \\(atan2(b, a)\\). + If `input` is real, a tensor of all zeros is returned. + + For example: + + ``` + input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64) + tf.math.angle(input).numpy() + # ==> array([2.0131705, 1.056345 ], dtype=float32) + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `float`, `double`, + `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32` or `float64`. + """ + with ops.name_scope(name, "Angle", [input]) as name: + input = ops.convert_to_tensor(input, name="input") + if input.dtype.is_complex: + return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name) + else: + return array_ops.where(input < 0, np.pi * array_ops.ones_like(input), + array_ops.zeros_like(input)) + + +# pylint: enable=redefined-outer-name,redefined-builtin + + +@tf_export("math.round", "round") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def round(x, name=None): # pylint: disable=redefined-builtin + """Rounds the values of a tensor to the nearest integer, element-wise. + + Rounds half to even. Also known as bankers rounding. If you want to round + according to the current system rounding mode use tf::cint. + For example: + + ```python + x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5]) + tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ] + ``` + + Args: + x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of same shape and type as `x`. + """ + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_integer: + return x + else: + return gen_math_ops.round(x, name=name) + + +# TODO(mdan): Include a full_type argument to replace dtype. +@tf_export("cast", "dtypes.cast") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def cast(x, dtype, name=None): + """Casts a tensor to a new type. + + The operation casts `x` (in case of `Tensor`) or `x.values` + (in case of `SparseTensor` or `IndexedSlices`) to `dtype`. + + For example: + + >>> x = tf.constant([1.8, 2.2], dtype=tf.float32) + >>> tf.cast(x, tf.int32) + + + Notice `tf.cast` has an alias `tf.dtypes.cast`: + + >>> x = tf.constant([1.8, 2.2], dtype=tf.float32) + >>> tf.dtypes.cast(x, tf.int32) + + + The operation supports data types (for `x` and `dtype`) of + `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, + `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`. + In case of casting from complex types (`complex64`, `complex128`) to real + types, only the real part of `x` is returned. In case of casting from real + types to complex types (`complex64`, `complex128`), the imaginary part of the + returned value is set to `0`. The handling of complex types here matches the + behavior of numpy. + + Note casting nan and inf values to integral types has undefined behavior. + + Note this operation can lead to a loss of precision when converting native + Python `float` and `complex` variables to `tf.float64` or `tf.complex128` + tensors, since the input is first converted to the `float32` data type and + then widened. It is recommended to use `tf.convert_to_tensor` instead of + `tf.cast` for any non-tensor inputs. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could + be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, + `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`, + `bfloat16`. + dtype: The destination type. The list of supported dtypes is the same as + `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and + same type as `dtype`. + + Raises: + TypeError: If `x` cannot be cast to the `dtype`. + + """ + base_type = dtypes.as_dtype(dtype).base_dtype + if ( + isinstance(x, tensor_lib.Tensor) or _pywrap_utils.IsResourceVariable(x) + ) and base_type == x.dtype: + return x + with ops.name_scope(name, "Cast", [x]) as name: + if isinstance(x, sparse_tensor.SparseTensor): + values_cast = cast(x.values, base_type, name=name) + x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape) + elif isinstance(x, indexed_slices.IndexedSlices): + values_cast = cast(x.values, base_type, name=name) + x = indexed_slices.IndexedSlices(values_cast, x.indices, x.dense_shape) + else: + # TODO(josh11b): If x is not already a Tensor, we could return + # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that + # allows some conversions that cast() can't do, e.g. casting numbers to + # strings. + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_complex and base_type.is_floating: + logging.warn( + f"You are casting an input of type {x.dtype.name} to an " + f"incompatible dtype {base_type.name}. This will " + "discard the imaginary part and may not be what you " + "intended." + ) + if x.dtype != base_type: + x = gen_math_ops.cast(x, base_type, name=name) + return x + + +@tf_export("dtypes.saturate_cast", "saturate_cast") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def saturate_cast(value, dtype, name=None): + """Performs a safe saturating cast of `value` to `dtype`. + + This function casts the input to `dtype` without overflow. If + there is a danger that values would over or underflow in the cast, this op + applies the appropriate clamping before the cast. See `tf.cast` for more + details. + + Args: + value: A `Tensor`. + dtype: The desired output `DType`. + name: A name for the operation (optional). + + Returns: + `value` safely cast to `dtype`. + """ + # When casting to a type with smaller representable range, clamp. + # Note that this covers casting to unsigned types as well. + with ops.name_scope(name, "saturate_cast", [value]) as name: + value = ops.convert_to_tensor(value, name="value") + dtype = dtypes.as_dtype(dtype).base_dtype + + in_dtype = value.dtype + if in_dtype.is_complex: + if dtype.is_complex: + # Clamp real and imag components separately, if required. + real_in_dtype = in_dtype.real_dtype + real_out_dtype = dtype.real_dtype + if ( + real_in_dtype.min < real_out_dtype.min + or real_in_dtype.max > real_out_dtype.max + ): + value = gen_math_ops._clip_by_value( + value, + ops.convert_to_tensor( + builtins.complex(real_out_dtype.min, real_out_dtype.min), + dtype=in_dtype), + ops.convert_to_tensor( + builtins.complex(real_out_dtype.max, real_out_dtype.max), + dtype=in_dtype), + name="clamp") + return cast(value, dtype, name=name) + else: + # Extract real component and fall through to clamp+cast. + value = real(value) + logging.warn("Casting complex to real discards imaginary part.") + in_dtype = in_dtype.real_dtype + + # in_dtype is real, but out_dtype could be complex. + out_real_dtype = dtype.real_dtype + + # TODO: b/288437118 - unconditionally apply `clip_by_value` to fix `inf` + # behavior. + if in_dtype.min < out_real_dtype.min or in_dtype.max > out_real_dtype.max: + # The output min/max may not actually be representable in the + # in_dtype (e.g. casting float32 to uint32). This can lead to undefined + # behavior when trying to cast a value outside the valid range of the + # target type. We work around this by nudging the min/max to fall within + # the valid output range. The catch is that we may actually saturate + # to a value less than the true saturation limit, but this is the best we + # can do in order to avoid UB without introducing a separate SaturateCast + # op. + np_dtype = in_dtype.as_numpy_dtype + min_limit = np_dtype(np.maximum(in_dtype.min, out_real_dtype.min)) + if min_limit < out_real_dtype.min: + min_limit = np.nextafter(min_limit, np_dtype(0), dtype=np_dtype) + + max_limit = np_dtype(np.minimum(in_dtype.max, out_real_dtype.max)) + if max_limit > out_real_dtype.max: + max_limit = np.nextafter(max_limit, np_dtype(0), dtype=np_dtype) + + value = gen_math_ops._clip_by_value( + value, + ops.convert_to_tensor(min_limit, dtype=in_dtype), + ops.convert_to_tensor(max_limit, dtype=in_dtype), + name="clamp", + ) + return cast(value, dtype, name=name) + + +@tf_export(v1=["to_float"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_float(x, name="ToFloat"): + """Casts a tensor to type `float32`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `float32`. + + Raises: + TypeError: If `x` cannot be cast to the `float32`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.float32)`. There are no further issues with eager execution + or tf.function. + + Before: + + >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double)) + + + After: + + >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32) + + + @end_compatibility + + """ + return cast(x, dtypes.float32, name=name) + + +@tf_export(v1=["to_double"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_double(x, name="ToDouble"): + """Casts a tensor to type `float64`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `float64`. + + Raises: + TypeError: If `x` cannot be cast to the `float64`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.double)`. There are no further issues with eager execution or + tf.function. + + Before: + + >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32)) + + + After: + + >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double) + + + @end_compatibility + + """ + return cast(x, dtypes.float64, name=name) + + +@tf_export(v1=["to_int32"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_int32(x, name="ToInt32"): + """Casts a tensor to type `int32`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `int32`. + + Raises: + TypeError: If `x` cannot be cast to the `int32`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.int32)`. There are no further issues with eager execution or + tf.function. + + Before: + + >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64)) + + + After: + + >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32) + + + @end_compatibility + + """ + return cast(x, dtypes.int32, name=name) + + +@tf_export(v1=["to_int64"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_int64(x, name="ToInt64"): + """Casts a tensor to type `int64`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `int64`. + + Raises: + TypeError: If `x` cannot be cast to the `int64`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.int64)`. There are no further issues with eager execution or + tf.function. + + Before: + + >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32)) + + + After: + + >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64) + + + @end_compatibility + + """ + return cast(x, dtypes.int64, name=name) + + +@tf_export(v1=["to_bfloat16"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_bfloat16(x, name="ToBFloat16"): + """Casts a tensor to type `bfloat16`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `bfloat16`. + + Raises: + TypeError: If `x` cannot be cast to the `bfloat16`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution + or tf.function. + + Before: + + >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32)) + + + After: + + >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16) + + + @end_compatibility + + """ + return cast(x, dtypes.bfloat16, name=name) + + +@tf_export(v1=["to_complex64"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_complex64(x, name="ToComplex64"): + """Casts a tensor to type `complex64`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `complex64`. + + Raises: + TypeError: If `x` cannot be cast to the `complex64`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.complex64)`. There are no further issues with eager execution + or tf.function. + + Before: + + >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128)) + + + After: + + >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64) + + + @end_compatibility + + """ + return cast(x, dtypes.complex64, name=name) + + +@tf_export(v1=["to_complex128"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") +def to_complex128(x, name="ToComplex128"): + """Casts a tensor to type `complex128`. + + Args: + x: A `Tensor` or `SparseTensor` or `IndexedSlices`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with + type `complex128`. + + Raises: + TypeError: If `x` cannot be cast to the `complex128`. + + @compatibility(TF2) + + This name was deprecated and removed in TF2, but has an exact replacement + `tf.cast(..., tf.complex128)`. There are no further issues with eager + execution or tf.function. + + Before: + + >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64)) + + + After: + + >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128) + + + @end_compatibility + + """ + return cast(x, dtypes.complex128, name=name) + + +# Conversion table for __truediv__. None entries mean no conversion required. +_TRUEDIV_TABLE = { + dtypes.uint8: dtypes.float32, + dtypes.int8: dtypes.float32, + dtypes.uint16: dtypes.float32, + dtypes.int16: dtypes.float32, + dtypes.uint32: dtypes.float64, + dtypes.int32: dtypes.float64, + dtypes.uint64: dtypes.float64, + dtypes.int64: dtypes.float64, + dtypes.bfloat16: None, + dtypes.float16: None, + dtypes.float32: None, + dtypes.float64: None, + dtypes.complex64: None, + dtypes.complex128: None, +} + + +def _truediv_python3(x, y, name=None): + with ops.name_scope(name, "truediv", [x, y]) as name: + x = ops.convert_to_tensor(x, name="x") + y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + x_dtype = x.dtype.base_dtype + y_dtype = y.dtype.base_dtype + if x_dtype != y_dtype: + raise TypeError(f"`x` and `y` must have the same dtype, " + f"got {x_dtype!r} != {y_dtype!r}.") + try: + dtype = _TRUEDIV_TABLE[x_dtype] + except KeyError: + raise TypeError( + f"Invalid dtype {x_dtype!r} in __truediv__. Expected one " + f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.") + if dtype is not None: + x = cast(x, dtype) + y = cast(y, dtype) + return gen_math_ops.real_div(x, y, name=name) + + +def _div_python2(x, y, name=None): + """Divide two values using Python 2 semantics. + + Used for Tensor.__div__. + + Args: + x: `Tensor` numerator of real numeric type. + y: `Tensor` denominator of real numeric type. + name: A name for the operation (optional). + + Returns: + `x / y` returns the quotient of x and y. + """ + + with ops.name_scope(name, "div", [x, y]) as name: + x = ops.convert_to_tensor(x, name="x") + y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype) + x_dtype = x.dtype.base_dtype + y_dtype = y.dtype.base_dtype + if x_dtype != y_dtype: + raise TypeError(f"`x` and `y` must have the same dtype, " + f"got {x_dtype!r} != {y_dtype!r}.") + if x_dtype.is_floating or x_dtype.is_complex: + return gen_math_ops.real_div(x, y, name=name) + else: + return gen_math_ops.floor_div(x, y, name=name) + + +@tf_export("math.truediv", "truediv") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def truediv(x, y, name=None): + """Divides x / y elementwise (using Python 3 division operator semantics). + + NOTE: Prefer using the Tensor operator or tf.divide which obey Python + division operator semantics. + + This function forces Python 3 division operator semantics where all integer + arguments are cast to floating types first. This op is generated by normal + `x / y` division in Python 3 and in Python 2.7 with + `from __future__ import division`. If you want integer division that rounds + down, use `x // y` or `tf.math.floordiv`. + + `x` and `y` must have the same numeric type. If the inputs are floating + point, the output will have the same type. If the inputs are integral, the + inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32` + and `int64` (matching the behavior of Numpy). + + Args: + x: `Tensor` numerator of numeric type. + y: `Tensor` denominator of numeric type. + name: A name for the operation (optional). + + Returns: + `x / y` evaluated in floating point. + + Raises: + TypeError: If `x` and `y` have different dtypes. + """ + return _truediv_python3(x, y, name) + + +@tf_export(v1=["div"]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated( + date=None, + instructions="Deprecated in favor of operator or tf.math.divide.") +def div(x, y, name=None): + """Divides x / y elementwise (using Python 2 division operator semantics). + + @compatibility(TF2) + This function is deprecated in TF2. Prefer using the Tensor division operator, + `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator + semantics. + @end_compatibility + + + This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` + and `y` are both integers then the result will be an integer. This is in + contrast to Python 3, where division with `/` is always a float while division + with `//` is always an integer. + + Args: + x: `Tensor` numerator of real numeric type. + y: `Tensor` denominator of real numeric type. + name: A name for the operation (optional). + + Returns: + `x / y` returns the quotient of x and y. + """ + return _div_python2(x, y, name) + + +@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("div_no_nan") +def div_no_nan(x, y, name=None): + """Computes a safe divide which returns 0 if `y` (denominator) is zero. + + For example: + + >>> tf.constant(3.0) / 0.0 + + >>> tf.math.divide_no_nan(3.0, 0.0) + + + Note that 0 is returned if `y` is 0 even if `x` is nonfinite: + + >>> tf.math.divide_no_nan(np.nan, 0.0) + + + Args: + x: A `Tensor` of a floating or integer dtype. + y: A `Tensor` with the same dtype as `x` and a compatible shape. + name: A name for the operation (optional). + + Returns: + The element-wise quotient as in `tf.math.divide(x, y)`, + except that division by zero produces `0.0`, not `nan`. + """ + + with ops.name_scope(name, "div_no_nan", [x, y]) as name: + if not tensor_util.is_tf_type(x) and tensor_util.is_tf_type(y): + # Treat this case specially like divide() does above. + y = ops.convert_to_tensor(y, name="y") + x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") + else: + x = ops.convert_to_tensor(x, name="x") + y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + x_dtype = x.dtype.base_dtype + y_dtype = y.dtype.base_dtype + if x_dtype != y_dtype: + raise TypeError(f"`x` and `y` must have the same dtype, " + f"got {x_dtype!r} != {y_dtype!r}.") + try: + dtype = _TRUEDIV_TABLE[x_dtype] + except KeyError as e: + raise TypeError( + f"Invalid dtype {x_dtype!r} in tf.math.divide_no_nan. Expected one " + f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}." + ) from e + if dtype is not None: + x = cast(x, dtype) + y = cast(y, dtype) + return gen_math_ops.div_no_nan(x, y, name=name) + + +@tf_export("math.multiply_no_nan") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def multiply_no_nan(x, y, name=None): + """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite. + + Note this is noncommutative: if y is NaN or infinite and x is 0, the result + will be NaN. + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`. + y: A `Tensor` whose dtype is compatible with `x`. + name: A name for the operation (optional). + + Returns: + The element-wise value of the x times y. + """ + + with ops.name_scope(name, "multiply_no_nan", [x, y]) as name: + x = ops.convert_to_tensor(x, name="x") + y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype) + x_dtype = x.dtype.base_dtype + y_dtype = y.dtype.base_dtype + if x_dtype != y_dtype: + raise TypeError(f"`x` and `y` must have the same dtype, " + f"got {x_dtype!r} != {y_dtype!r}") + return gen_math_ops.mul_no_nan(x, y, name=name) + + +def mod(x, y, name=None): + r"""Returns element-wise remainder of division. + + This follows Python semantics in that the + result here is consistent with a flooring divide. E.g. + `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y. + + *NOTE*: `math.floormod` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, + `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `bfloat16`, `half`, + `float32`, `float64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + with ops.name_scope(name, "mod", [x, y]) as name: + return gen_math_ops.floor_mod(x, y, name=name) + + +@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("floordiv") +def floordiv(x, y, name=None): + """Divides `x / y` elementwise, rounding toward the most negative integer. + + Mathematically, this is equivalent to floor(x / y). For example: + floor(8.4 / 4.0) = floor(2.1) = 2.0 + floor(-8.4 / 4.0) = floor(-2.1) = -3.0 + This is equivalent to the '//' operator in Python 3.0 and above. + + Note: `x` and `y` must have the same type, and the result will have the same + type as well. + + Args: + x: `Tensor` numerator of real numeric type. + y: `Tensor` denominator of real numeric type. + name: A name for the operation (optional). + + Returns: + `x / y` rounded toward -infinity. + + Raises: + TypeError: If the inputs are complex. + """ + with ops.name_scope(name, "floordiv", [x, y]) as name: + return gen_math_ops.floor_div(x, y, name=name) + + +realdiv = gen_math_ops.real_div +truncatediv = gen_math_ops.truncate_div +floor_div = gen_math_ops.floor_div +truncatemod = gen_math_ops.truncate_mod +floormod = gen_math_ops.floor_mod + + +@tf_export("__operators__.add", v1=[]) +@dispatch.add_dispatch_support +def _add_dispatch(x, y, name=None): + """The operation invoked by the `Tensor.__add__` operator. + + Purpose in the API: + + This method is exposed in TensorFlow's API so that library developers + can register dispatching for `Tensor.__add__` to allow it to handle + custom composite tensors & other custom objects. + + The API symbol is not intended to be called by users directly and does + appear in TensorFlow's generated documentation. + + Args: + x: The left-hand side of the `+` operator. + y: The right-hand side of the `+` operator. + name: an optional name for the operation. + + Returns: + The result of the elementwise `+` operation. + """ + if ops.is_auto_dtype_conversion_enabled(): + return add(x, y, name=name) + if not isinstance(y, tensor_lib.Tensor) and not isinstance( + y, sparse_tensor.SparseTensor): + y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + if x.dtype == dtypes.string: + return gen_math_ops.add(x, y, name=name) + else: + return gen_math_ops.add_v2(x, y, name=name) + + +def _mul_dispatch(x, y, name=None): + """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse".""" + if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse. + new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, + y.dense_shape, x, name) + return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape) + else: + return multiply(x, y, name=name) + + +@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"]) +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("logical_xor") +def logical_xor(x, y, name="LogicalXor"): + """Logical XOR function. + + x ^ y = (x | y) & ~(x & y) + + Requires that `x` and `y` have the same shape or have + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shapes. For example, `x` and `y` can be: + + - Two single elements of type `bool` + - One `tf.Tensor` of type `bool` and one single `bool`, where the result will + be calculated by applying logical XOR with the single element to each + element in the larger Tensor. + - Two `tf.Tensor` objects of type `bool` of the same shape. In this case, + the result will be the element-wise logical XOR of the two input tensors. + + Usage: + + >>> a = tf.constant([True]) + >>> b = tf.constant([False]) + >>> tf.math.logical_xor(a, b) + + + >>> c = tf.constant([True]) + >>> x = tf.constant([False, True, True, False]) + >>> tf.math.logical_xor(c, x) + + + >>> y = tf.constant([False, False, True, True]) + >>> z = tf.constant([False, True, False, True]) + >>> tf.math.logical_xor(y, z) + + + Args: + x: A `tf.Tensor` type bool. + y: A `tf.Tensor` of type bool. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of type bool with the same size as that of x or y. + """ + # TODO(alemi) Make this a cwise op if people end up relying on it. + return gen_math_ops.logical_and( + gen_math_ops.logical_or(x, y), + gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), + name=name) + + +def and_(x, y, name=None): + if x.dtype == dtypes.bool: + return gen_math_ops.logical_and(x, y, name) + return gen_bitwise_ops.bitwise_and(x, y) + + +def or_(x, y, name=None): + if x.dtype == dtypes.bool: + return gen_math_ops.logical_or(x, y, name) + return gen_bitwise_ops.bitwise_or(x, y) + + +def xor_(x, y, name=None): + if x.dtype == dtypes.bool: + return logical_xor(x, y, name) + return gen_bitwise_ops.bitwise_xor(x, y) + + +def invert_(x, name=None): + if x.dtype == dtypes.bool: + return gen_math_ops.logical_not(x, name=name) + return gen_bitwise_ops.invert(x, name=name) + + +@tf_export("math.equal", "equal") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def equal(x, y, name=None): + """Returns the truth value of (x == y) element-wise. + + Performs a [broadcast]( + https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the + arguments and then an element-wise equality comparison, returning a Tensor of + boolean values. + + For example: + + >>> x = tf.constant([2, 4]) + >>> y = tf.constant(2) + >>> tf.math.equal(x, y) + + + >>> x = tf.constant([2, 4]) + >>> y = tf.constant([2, 4]) + >>> tf.math.equal(x, y) + + + Args: + x: A `tf.Tensor`. + y: A `tf.Tensor`. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of type bool with the same size as that of x or y. + + Raises: + `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible + """ + return gen_math_ops.equal(x, y, name=name) + + +@tf_export("math.not_equal", "not_equal") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def not_equal(x, y, name=None): + """Returns the truth value of (x != y) element-wise. + + Performs a [broadcast]( + https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the + arguments and then an element-wise inequality comparison, returning a Tensor + of boolean values. + + For example: + + >>> x = tf.constant([2, 4]) + >>> y = tf.constant(2) + >>> tf.math.not_equal(x, y) + + + >>> x = tf.constant([2, 4]) + >>> y = tf.constant([2, 4]) + >>> tf.math.not_equal(x, y) + + + Args: + x: A `tf.Tensor`. + y: A `tf.Tensor`. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of type bool with the same size as that of x or y. + + Raises: + `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible + """ + return gen_math_ops.not_equal(x, y, name=name) + + +@tf_export("__operators__.eq", v1=[]) +@dispatch.add_dispatch_support +def tensor_equals(self, other): + """The operation invoked by the `Tensor.__eq__` operator. + + Compares two tensors element-wise for equality if they are + broadcast-compatible; or returns False if they are not broadcast-compatible. + (Note that this behavior differs from `tf.math.equal`, which raises an + exception if the two tensors are not broadcast-compatible.) + + Purpose in the API: + + This method is exposed in TensorFlow's API so that library developers + can register dispatching for `Tensor.__eq__` to allow it to handle + custom composite tensors & other custom objects. + + The API symbol is not intended to be called by users directly and does + appear in TensorFlow's generated documentation. + + Args: + self: The left-hand side of the `==` operator. + other: The right-hand side of the `==` operator. + + Returns: + The result of the elementwise `==` operation, or `False` if the arguments + are not broadcast-compatible. + """ + if other is None: + return False + g = getattr(self, "graph", None) + if ( + tensor_lib.Tensor._USE_EQUALITY + and ops.executing_eagerly_outside_functions() + and (g is None or g.building_function) + ): + self, other = override_binary_operator.maybe_promote_tensors(self, other) + return gen_math_ops.equal(self, other, incompatible_shape_error=False) + else: + # In legacy graph mode, tensor equality is object equality + return self is other + + +@tf_export("__operators__.ne", v1=[]) +@dispatch.add_dispatch_support +def tensor_not_equals(self, other): + """The operation invoked by the `Tensor.__ne__` operator. + + Compares two tensors element-wise for inequality if they are + broadcast-compatible; or returns True if they are not broadcast-compatible. + (Note that this behavior differs from `tf.math.not_equal`, which raises an + exception if the two tensors are not broadcast-compatible.) + + Purpose in the API: + + This method is exposed in TensorFlow's API so that library developers + can register dispatching for `Tensor.__ne__` to allow it to handle + custom composite tensors & other custom objects. + + The API symbol is not intended to be called by users directly and does + appear in TensorFlow's generated documentation. + + Args: + self: The left-hand side of the `!=` operator. + other: The right-hand side of the `!=` operator. + + Returns: + The result of the elementwise `!=` operation, or `True` if the arguments + are not broadcast-compatible. + """ + if other is None: + return True + if ( + tensor_lib.Tensor._USE_EQUALITY + and ops.executing_eagerly_outside_functions() + ): + self, other = override_binary_operator.maybe_promote_tensors(self, other) + return gen_math_ops.not_equal(self, other, incompatible_shape_error=False) + else: + # In legacy graph mode, tensor equality is object equality + return self is not other + + +@tf_export("range") +@dispatch.add_dispatch_support +def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin + """Creates a sequence of numbers. + + Creates a sequence of numbers that begins at `start` and extends by + increments of `delta` up to but not including `limit`. + + The dtype of the resulting tensor is inferred from the inputs unless + it is provided explicitly. + + Like the Python builtin `range`, `start` defaults to 0, so that + `range(n) = range(0, n)`. + + For example: + + >>> start = 3 + >>> limit = 18 + >>> delta = 3 + >>> tf.range(start, limit, delta) + + + >>> start = 3 + >>> limit = 1 + >>> delta = -0.5 + >>> tf.range(start, limit, delta) + + + >>> limit = 5 + >>> tf.range(limit) + + + Args: + start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit` + is not None; otherwise, acts as range limit and first entry defaults to 0. + limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None, + defaults to the value of `start` while the first entry of the range + defaults to 0. + delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to + 1. + dtype: The type of the elements of the resulting tensor. + name: A name for the operation. Defaults to "range". + + Returns: + An 1-D `Tensor` of type `dtype`. + + @compatibility(numpy) + Equivalent to np.arange + @end_compatibility + """ + if limit is None: + start, limit = 0, start + + with ops.name_scope(name, "Range", [start, limit, delta]) as name: + if not isinstance(start, tensor_lib.Tensor): + start = ops.convert_to_tensor(start, dtype=dtype, name="start") + if not isinstance(limit, tensor_lib.Tensor): + limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit") + if not isinstance(delta, tensor_lib.Tensor): + delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta") + + # infer dtype if not explicitly provided + if dtype is None: + dtype_hierarchy = [ + dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64 + ] + assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta]) + inferred_dtype = max([arg.dtype for arg in [start, limit, delta]], + key=dtype_hierarchy.index) + else: + inferred_dtype = dtype + # Always try to perform a cast even when start/limit/delta are already + # tensors. This will resolve the case where start/limit/delta's original's + # dtype is different from provided dtype. + start = cast(start, inferred_dtype) + limit = cast(limit, inferred_dtype) + delta = cast(delta, inferred_dtype) + + return gen_math_ops._range(start, limit, delta, name=name) + + +def _range_tensor_conversion_function(value, dtype=None, name=None, + as_ref=False): + del as_ref + return range(value.start, value.stop, value.step, dtype=dtype, name=name) + + +tensor_conversion_registry.register_tensor_conversion_function( + builtins.range, _range_tensor_conversion_function) + + +# Reduction operations +def _ReductionDims(x, axis): # pylint: disable=invalid-name + """Returns range(0, rank(x)) if axis is None.""" + if axis is not None: + return axis + else: + try: + x_rank = x.shape.rank + except AttributeError: + x_rank = None + + # Fast path: avoid creating Rank and Range ops if ndims is known. + if x_rank: + return constant_op.constant(np.arange(x_rank, dtype=np.int32)) + else: + # Otherwise, we rely on Range and Rank to do the right thing at run-time. + return range(0, array_ops.rank(x)) + + +def _has_fully_defined_shape(tensor): + """Returns true if tensor has a fully defined shape.""" + return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined() + + +def _may_reduce_to_scalar(keepdims, axis, output): + """Set a reduction's output shape to be a scalar if we are certain.""" + if not _has_fully_defined_shape(output) and (not keepdims) and ( + axis is None): + output.set_shape(()) + return output + + +@tf_export(v1=["math.reduce_sum", "reduce_sum"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_sum_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes the sum of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.add` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> # x has a shape of (2, 3) (two rows and three columns): + >>> x = tf.constant([[1, 1, 1], [1, 1, 1]]) + >>> x.numpy() + array([[1, 1, 1], + [1, 1, 1]], dtype=int32) + >>> # sum all the elements + >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6 + >>> tf.reduce_sum(x).numpy() + 6 + >>> # reduce along the first dimension + >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2] + >>> tf.reduce_sum(x, 0).numpy() + array([2, 2, 2], dtype=int32) + >>> # reduce along the second dimension + >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3] + >>> tf.reduce_sum(x, 1).numpy() + array([3, 3], dtype=int32) + >>> # keep the original dimensions + >>> tf.reduce_sum(x, 1, keepdims=True).numpy() + array([[3], + [3]], dtype=int32) + >>> # reduce along both dimensions + >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6 + >>> # or, equivalently, reduce along rows, then reduce the resultant array + >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2] + >>> # 2 + 2 + 2 = 6 + >>> tf.reduce_sum(x, [0, 1]).numpy() + 6 + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor, of the same dtype as the input_tensor. + + @compatibility(numpy) + Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to + int64 while tensorflow returns the same dtype as the input. + @end_compatibility + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_sum(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_sum", "reduce_sum", v1=[]) +@dispatch.add_dispatch_support +def reduce_sum(input_tensor, axis=None, keepdims=False, name=None): + """Computes the sum of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.add` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> # x has a shape of (2, 3) (two rows and three columns): + >>> x = tf.constant([[1, 1, 1], [1, 1, 1]]) + >>> x.numpy() + array([[1, 1, 1], + [1, 1, 1]], dtype=int32) + >>> # sum all the elements + >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6 + >>> tf.reduce_sum(x).numpy() + 6 + >>> # reduce along the first dimension + >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2] + >>> tf.reduce_sum(x, 0).numpy() + array([2, 2, 2], dtype=int32) + >>> # reduce along the second dimension + >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3] + >>> tf.reduce_sum(x, 1).numpy() + array([3, 3], dtype=int32) + >>> # keep the original dimensions + >>> tf.reduce_sum(x, 1, keepdims=True).numpy() + array([[3], + [3]], dtype=int32) + >>> # reduce along both dimensions + >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6 + >>> # or, equivalently, reduce along rows, then reduce the resultant array + >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2] + >>> # 2 + 2 + 2 = 6 + >>> tf.reduce_sum(x, [0, 1]).numpy() + 6 + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor)]`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor, of the same dtype as the input_tensor. + + @compatibility(numpy) + Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to + int64 while tensorflow returns the same dtype as the input. + @end_compatibility + """ + + return reduce_sum_with_dims(input_tensor, axis, keepdims, name, + _ReductionDims(input_tensor, axis)) + + +def reduce_sum_with_dims(input_tensor, + axis=None, + keepdims=False, + name=None, + dims=None): + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops._sum(input_tensor, dims, keepdims, name=name)) + + +@tf_export("math.reduce_euclidean_norm") +@dispatch.add_dispatch_support +def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None): + """Computes the Euclidean norm of elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + ```python + x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32 + tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32 + y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32) + tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17) + tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)] + tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)] + tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]] + tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17) + ``` + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor, of the same dtype as the input_tensor. + """ + keepdims = bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops.euclidean_norm( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export(v1=["math.count_nonzero", "count_nonzero"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +@deprecation.deprecated_args( + None, "reduction_indices is deprecated, use axis instead", + "reduction_indices") +def count_nonzero(input_tensor=None, + axis=None, + keepdims=None, + dtype=dtypes.int64, + name=None, + reduction_indices=None, + keep_dims=None, + input=None): # pylint: disable=redefined-builtin + """Computes number of nonzero elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + **NOTE** Floating point comparison to zero is done by exact floating point + equality check. Small values are **not** rounded to zero for purposes of + the nonzero check. + + For example: + + ```python + x = tf.constant([[0, 1, 0], [1, 1, 0]]) + tf.math.count_nonzero(x) # 3 + tf.math.count_nonzero(x, 0) # [1, 2, 0] + tf.math.count_nonzero(x, 1) # [1, 2] + tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] + tf.math.count_nonzero(x, [0, 1]) # 3 + ``` + + **NOTE** Strings are compared against zero-length empty string `""`. Any + string with a size greater than zero is already considered as nonzero. + + For example: + ```python + x = tf.constant(["", "a", " ", "b", ""]) + tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings. + ``` + + Args: + input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or + `string`. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + dtype: The output dtype; defaults to `tf.int64`. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + input: Overrides input_tensor. For compatibility. + + Returns: + The reduced tensor (number of nonzero values). + """ + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + input_tensor = deprecation.deprecated_argument_lookup("input", input, + "input_tensor", + input_tensor) + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + + return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name) + + +@tf_export("math.count_nonzero", v1=[]) +@dispatch.add_dispatch_support +def count_nonzero_v2( + input, # pylint: disable=redefined-builtin + axis=None, + keepdims=None, + dtype=dtypes.int64, + name=None): + """Computes number of nonzero elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + **NOTE** Floating point comparison to zero is done by exact floating point + equality check. Small values are **not** rounded to zero for purposes of + the nonzero check. + + For example: + + ```python + x = tf.constant([[0, 1, 0], [1, 1, 0]]) + tf.math.count_nonzero(x) # 3 + tf.math.count_nonzero(x, 0) # [1, 2, 0] + tf.math.count_nonzero(x, 1) # [1, 2] + tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] + tf.math.count_nonzero(x, [0, 1]) # 3 + ``` + + **NOTE** Strings are compared against zero-length empty string `""`. Any + string with a size greater than zero is already considered as nonzero. + + For example: + ```python + x = tf.constant(["", "a", " ", "b", ""]) + tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings. + ``` + + Args: + input: The tensor to reduce. Should be of numeric type, `bool`, or `string`. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input), rank(input))`. + keepdims: If true, retains reduced dimensions with length 1. + dtype: The output dtype; defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + The reduced tensor (number of nonzero values). + """ + if keepdims is None: + keepdims = False + with ops.name_scope(name, "count_nonzero", [input]): + input = ops.convert_to_tensor(input, name="input") + # if the input is already of type bool, then there is no need + # to compare to zero. + if input.dtype == dtypes.bool: + predicate = input + else: + # A scalar of 'zero' is enough as `not_equal` will broadcast. + zero = array_ops.zeros([], dtype=input.dtype) + predicate = gen_math_ops.not_equal(input, zero) + return cast( + reduce_sum( + # int64 reduction happens on GPU + cast(predicate, dtypes.int64), + axis=axis, + keepdims=keepdims, + ), + dtype=dtype, + ) + + +@tf_export(v1=["math.reduce_mean", "reduce_mean"]) +@dispatch.add_dispatch_support +def reduce_mean_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes the mean of elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis` by computing the + mean of elements across the dimensions in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a tensor with a single + element is returned. + + For example: + + >>> x = tf.constant([[1., 1.], [2., 2.]]) + >>> tf.reduce_mean(x) + + >>> tf.reduce_mean(x, 0) + + >>> tf.reduce_mean(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.mean + + Please note that `np.mean` has a `dtype` parameter that could be used to + specify the output type. By default this is `dtype=float64`. On the other + hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, + for example: + + >>> x = tf.constant([1, 0, 1, 0]) + >>> tf.reduce_mean(x) + + >>> y = tf.constant([1., 0., 1., 0.]) + >>> tf.reduce_mean(y) + + + @end_compatibility + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_mean(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_mean", "reduce_mean", v1=[]) +@dispatch.add_dispatch_support +def reduce_mean(input_tensor, axis=None, keepdims=False, name=None): + """Computes the mean of elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis` by computing the + mean of elements across the dimensions in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a tensor with a single + element is returned. + + For example: + + >>> x = tf.constant([[1., 1.], [2., 2.]]) + >>> tf.reduce_mean(x) + + >>> tf.reduce_mean(x, 0) + + >>> tf.reduce_mean(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.mean + + Please note that `np.mean` has a `dtype` parameter that could be used to + specify the output type. By default this is `dtype=float64`. On the other + hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, + for example: + + >>> x = tf.constant([1, 0, 1, 0]) + >>> tf.reduce_mean(x) + + >>> y = tf.constant([1., 0., 1., 0.]) + >>> tf.reduce_mean(y) + + + @end_compatibility + """ + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops.mean( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export("math.reduce_variance") +@dispatch.add_dispatch_support +def reduce_variance(input_tensor, axis=None, keepdims=False, name=None): + """Computes the variance of elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[1., 2.], [3., 4.]]) + >>> tf.math.reduce_variance(x) + + >>> tf.math.reduce_variance(x, 0) + + >>> tf.math.reduce_variance(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have real or complex type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name scope for the associated operations (optional). + + Returns: + The reduced tensor, of the same dtype as the input_tensor. Note, for + `complex64` or `complex128` input, the returned `Tensor` will be of type + `float32` or `float64`, respectively. + + @compatibility(numpy) + Equivalent to np.var + + Please note `np.var` has a `dtype` parameter that could be used to specify the + output type. By default this is `dtype=float64`. On the other hand, + `tf.math.reduce_variance` has aggressive type inference from `input_tensor`. + @end_compatibility + """ + name = name if name else "reduce_variance" + with ops.name_scope(name): + input_tensor = ops.convert_to_tensor(input_tensor) + means = reduce_mean(input_tensor, axis=axis, keepdims=True) + if means.dtype.is_integer: + raise TypeError(f"Input must be either real or complex. " + f"Received integer type {means.dtype}.") + diff = input_tensor - means + if diff.dtype.is_complex: + # For complex values we need to take the absolute value before squaring. + # This is achieved by multiplying with the conjugate. + real_dtype = diff.dtype.real_dtype + squared_deviations = gen_math_ops.real( + gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype) + else: + squared_deviations = gen_math_ops.square(diff) + return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims) + + +@tf_export("math.reduce_std") +@dispatch.add_dispatch_support +def reduce_std(input_tensor, axis=None, keepdims=False, name=None): + """Computes the standard deviation of elements across dimensions of a tensor. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[1., 2.], [3., 4.]]) + >>> tf.math.reduce_std(x) + + >>> tf.math.reduce_std(x, 0) + + >>> tf.math.reduce_std(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have real or complex type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name scope for the associated operations (optional). + + Returns: + The reduced tensor, of the same dtype as the input_tensor. Note, for + `complex64` or `complex128` input, the returned `Tensor` will be of type + `float32` or `float64`, respectively. + + @compatibility(numpy) + Equivalent to np.std + + Please note `np.std` has a `dtype` parameter that could be used to specify the + output type. By default this is `dtype=float64`. On the other hand, + `tf.math.reduce_std` has aggressive type inference from `input_tensor`. + @end_compatibility + """ + name = name if name else "reduce_std" + with ops.name_scope(name): + input_tensor = ops.convert_to_tensor(input_tensor) + variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims) + return gen_math_ops.sqrt(variance) + + +@tf_export("math.reduce_prod", "reduce_prod", v1=[]) +@dispatch.add_dispatch_support +def reduce_prod(input_tensor, axis=None, keepdims=False, name=None): + """Computes `tf.math.multiply` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.multiply` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[1., 2.], [3., 4.]]) + >>> tf.math.reduce_prod(x) + + >>> tf.math.reduce_prod(x, 0) + + >>> tf.math.reduce_prod(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.prod + @end_compatibility + """ + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops.prod( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export(v1=["math.reduce_prod", "reduce_prod"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_prod_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes `tf.math.multiply` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.multiply` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[1., 2.], [3., 4.]]) + >>> tf.math.reduce_prod(x) + + >>> tf.math.reduce_prod(x, 0) + + >>> tf.math.reduce_prod(x, 1) + + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.prod + @end_compatibility + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_prod(input_tensor, axis, keepdims, name) + + +@tf_export(v1=["math.reduce_min", "reduce_min"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_min_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes the `tf.math.minimum` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.minimum` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + Usage example: + + >>> x = tf.constant([5, 1, 2, 4]) + >>> tf.reduce_min(x) + + >>> x = tf.constant([-5, -1, -2, -4]) + >>> tf.reduce_min(x) + + >>> x = tf.constant([4, float('nan')]) + >>> tf.reduce_min(x) + + >>> x = tf.constant([float('nan'), float('nan')]) + >>> tf.reduce_min(x) + + >>> x = tf.constant([float('-inf'), float('inf')]) + >>> tf.reduce_min(x) + + + See the numpy docs for `np.amin` and `np.nanmin` behavior. + + Args: + input_tensor: The tensor to reduce. Should have real numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_min(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_min", "reduce_min", v1=[]) +@dispatch.add_dispatch_support +def reduce_min(input_tensor, axis=None, keepdims=False, name=None): + """Computes the `tf.math.minimum` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.minimum` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> a = tf.constant([ + ... [[1, 2], [3, 4]], + ... [[1, 2], [3, 4]] + ... ]) + >>> tf.reduce_min(a) + + + Choosing a specific axis returns minimum element in the given axis: + + >>> b = tf.constant([[1, 2, 3], [4, 5, 6]]) + >>> tf.reduce_min(b, axis=0) + + >>> tf.reduce_min(b, axis=1) + + + Setting `keepdims` to `True` retains the dimension of `input_tensor`: + + >>> tf.reduce_min(a, keepdims=True) + + >>> tf.math.reduce_min(a, axis=0, keepdims=True) + + + Args: + input_tensor: The tensor to reduce. Should have real numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.min + @end_compatibility + """ + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops._min( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export(v1=["math.reduce_max", "reduce_max"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_max_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes `tf.math.maximum` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.maximum` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + Usage example: + + >>> x = tf.constant([5, 1, 2, 4]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([-5, -1, -2, -4]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([4, float('nan')]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([float('nan'), float('nan')]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([float('-inf'), float('inf')]) + >>> tf.reduce_max(x) + + + See the numpy docs for `np.amax` and `np.nanmax` behavior. + + Args: + input_tensor: The tensor to reduce. Should have real numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_max(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_max", "reduce_max", v1=[]) +@dispatch.add_dispatch_support +def reduce_max(input_tensor, axis=None, keepdims=False, name=None): + """Computes `tf.math.maximum` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.maximum` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + Usage example: + + >>> x = tf.constant([5, 1, 2, 4]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([-5, -1, -2, -4]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([4, float('nan')]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([float('nan'), float('nan')]) + >>> tf.reduce_max(x) + + >>> x = tf.constant([float('-inf'), float('inf')]) + >>> tf.reduce_max(x) + + + See the numpy docs for `np.amax` and `np.nanmax` behavior. + + Args: + input_tensor: The tensor to reduce. Should have real numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + """ + return reduce_max_with_dims(input_tensor, axis, keepdims, name, + _ReductionDims(input_tensor, axis)) + + +def reduce_max_with_dims(input_tensor, + axis=None, + keepdims=False, + name=None, + dims=None): + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops._max(input_tensor, dims, keepdims, name=name)) + + +@tf_export(v1=["math.reduce_all", "reduce_all"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_all_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes `tf.math.logical_and` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.logical_and` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[True, True], [False, False]]) + >>> tf.math.reduce_all(x) + + >>> tf.math.reduce_all(x, 0) + + >>> tf.math.reduce_all(x, 1) + + + Args: + input_tensor: The boolean tensor to reduce. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.all + @end_compatibility + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_all(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_all", "reduce_all", v1=[]) +@dispatch.add_dispatch_support +def reduce_all(input_tensor, axis=None, keepdims=False, name=None): + """Computes `tf.math.logical_and` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.logical_and` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[True, True], [False, False]]) + >>> tf.math.reduce_all(x) + + >>> tf.math.reduce_all(x, 0) + + >>> tf.math.reduce_all(x, 1) + + + Args: + input_tensor: The boolean tensor to reduce. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.all + @end_compatibility + """ + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops._all( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export(v1=["math.reduce_any", "reduce_any"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_any_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes `tf.math.logical_or` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.logical_or` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[True, True], [False, False]]) + >>> tf.reduce_any(x) + + >>> tf.reduce_any(x, 0) + + >>> tf.reduce_any(x, 1) + + + Args: + input_tensor: The boolean tensor to reduce. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.any + @end_compatibility + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_any(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_any", "reduce_any", v1=[]) +@dispatch.add_dispatch_support +def reduce_any(input_tensor, axis=None, keepdims=False, name=None): + """Computes `tf.math.logical_or` of elements across dimensions of a tensor. + + This is the reduction operation for the elementwise `tf.math.logical_or` op. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` is None, all dimensions are reduced, and a + tensor with a single element is returned. + + For example: + + >>> x = tf.constant([[True, True], [False, False]]) + >>> tf.reduce_any(x) + + >>> tf.reduce_any(x, 0) + + >>> tf.reduce_any(x, 1) + + + Args: + input_tensor: The boolean tensor to reduce. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + + @compatibility(numpy) + Equivalent to np.any + @end_compatibility + """ + keepdims = False if keepdims is None else bool(keepdims) + return _may_reduce_to_scalar( + keepdims, axis, + gen_math_ops._any( + input_tensor, _ReductionDims(input_tensor, axis), keepdims, + name=name)) + + +@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, + "keep_dims is deprecated, use keepdims instead", + "keep_dims") +def reduce_logsumexp_v1(input_tensor, + axis=None, + keepdims=None, + name=None, + reduction_indices=None, + keep_dims=None): + """Computes log(sum(exp(elements across dimensions of a tensor))). + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + This function is more numerically stable than log(sum(exp(input))). It avoids + overflows caused by taking the exp of large inputs and underflows caused by + taking the log of small inputs. + + For example: + + ```python + x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) + tf.reduce_logsumexp(x) # log(6) + tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] + tf.reduce_logsumexp(x, 1) # [log(3), log(3)] + tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] + tf.reduce_logsumexp(x, [0, 1]) # log(6) + ``` + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + reduction_indices: The old (deprecated) name for axis. + keep_dims: Deprecated alias for `keepdims`. + + Returns: + The reduced tensor. + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, + "reduction_indices", + reduction_indices) + keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, + "keep_dims", keep_dims) + return reduce_logsumexp(input_tensor, axis, keepdims, name) + + +@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[]) +@dispatch.add_dispatch_support +def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): + """Computes log(sum(exp(elements across dimensions of a tensor))). + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + of the entries in `axis`, which must be unique. If `keepdims` is true, the + reduced dimensions are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + This function is more numerically stable than log(sum(exp(input))). It avoids + overflows caused by taking the exp of large inputs and underflows caused by + taking the log of small inputs. + + For example: + + ```python + x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) + tf.reduce_logsumexp(x) # log(6) + tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] + tf.reduce_logsumexp(x, 1) # [log(3), log(3)] + tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] + tf.reduce_logsumexp(x, [0, 1]) # log(6) + ``` + + Args: + input_tensor: The tensor to reduce. Should have numeric type. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keepdims: If true, retains reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + The reduced tensor. + """ + with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name: + raw_max = reduce_max(input_tensor, axis=axis, keepdims=True) + my_max = array_ops.stop_gradient( + gen_math_ops.select( + gen_math_ops.is_finite(raw_max), raw_max, + gen_array_ops.zeros_like(raw_max))) + result = gen_math_ops.log( + reduce_sum( + exp(subtract(input_tensor, my_max)), + axis=axis, + keepdims=keepdims)) + if not keepdims: + my_max = array_ops.reshape(my_max, gen_array_ops.shape(result)) + result = add(result, my_max, name=name) + return _may_reduce_to_scalar(keepdims, axis, result) + + +@tf_export("linalg.trace", v1=["linalg.trace", "trace"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("trace") +def trace(x, name=None): + """Compute the trace of a tensor `x`. + + `trace(x)` returns the sum along the main diagonal of each inner-most matrix + in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output + is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where + + `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])` + + For example: + + ```python + x = tf.constant([[1, 2], [3, 4]]) + tf.linalg.trace(x) # 5 + + x = tf.constant([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + tf.linalg.trace(x) # 15 + + x = tf.constant([[[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + [[-1, -2, -3], + [-4, -5, -6], + [-7, -8, -9]]]) + tf.linalg.trace(x) # [15, -15] + ``` + + Args: + x: tensor. + name: A name for the operation (optional). + + Returns: + The trace of input tensor. + """ + with ops.name_scope(name, "Trace", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name) + + +@tf_export("linalg.matmul", "matmul") +@dispatch.add_dispatch_support +def matmul( + a, + b, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, + grad_a=False, + grad_b=False, + name=None, +): + """Multiplies matrix `a` by matrix `b`, producing `a` * `b`. + + The inputs must, following any transpositions, be tensors of rank >= 2 + where the inner 2 dimensions specify valid matrix multiplication dimensions, + and any further outer dimensions specify matching batch size. + + Both matrices must be of the same type. The supported types are: + `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, + `complex64`, `complex128`. + + Either matrix can be transposed or adjointed (conjugated and transposed) on + the fly by setting one of the corresponding flag to `True`. These are `False` + by default. + + If one or both of the matrices contain a lot of zeros, a more efficient + multiplication algorithm can be used by setting the corresponding + `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. + This optimization is only available for plain matrices (rank-2 tensors) with + datatypes `bfloat16` or `float32`. + + A simple 2-D tensor matrix multiplication: + + >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) + >>> a # 2-D tensor + + >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) + >>> b # 2-D tensor + + >>> c = tf.matmul(a, b) + >>> c # `a` * `b` + + + A batch matrix multiplication with batch shape [2]: + + >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) + >>> a # 3-D tensor + + >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2]) + >>> b # 3-D tensor + + >>> c = tf.matmul(a, b) + >>> c # `a` * `b` + + + Since python >= 3.5 the @ operator is supported + (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow, + it simply calls the `tf.matmul()` function, so the following lines are + equivalent: + + >>> d = a @ b @ [[10], [11]] + >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]]) + + Args: + a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`, + `complex64`, `complex128` and rank > 1. + b: `tf.Tensor` with same type and rank as `a`. + transpose_a: If `True`, `a` is transposed before multiplication. + transpose_b: If `True`, `b` is transposed before multiplication. + adjoint_a: If `True`, `a` is conjugated and transposed before + multiplication. + adjoint_b: If `True`, `b` is conjugated and transposed before + multiplication. + a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this + **does not support `tf.sparse.SparseTensor`**, it just makes optimizations + that assume most values in `a` are zero. See + `tf.sparse.sparse_dense_matmul` for some support for + `tf.sparse.SparseTensor` multiplication. + b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this + **does not support `tf.sparse.SparseTensor`**, it just makes optimizations + that assume most values in `b` are zero. See + `tf.sparse.sparse_dense_matmul` for some support for + `tf.sparse.SparseTensor` multiplication. + output_type: The output datatype if needed. Defaults to None in which case + the output_type is the same as input type. Currently only works when input + tensors are type (u)int8 and output_type can be int32. + grad_a: Set it to `True` to hint that Tensor `a` is for the backward pass. + grad_b: Set it to `True` to hint that Tensor `b` is for the backward pass. + name: Name for the operation (optional). + + Returns: + A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix + is the product of the corresponding matrices in `a` and `b`, e.g. if all + transpose or adjoint attributes are `False`: + + `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`, + for all indices `i`, `j`. + + Note: This is matrix product, not element-wise product. + + + Raises: + ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and + `adjoint_b` are both set to `True`. + TypeError: If output_type is specified but the types of `a`, `b` and + `output_type` is not (u)int8, (u)int8 and int32. + """ + + with ops.name_scope(name, "MatMul", [a, b]) as name: + if transpose_a and adjoint_a: + raise ValueError( + f"Only one of `transpose_a` and `adjoint_a` can be True. " + f"Received `transpose_a`={transpose_a}, " + f"`adjoint_a`={adjoint_a}.") + if transpose_b and adjoint_b: + raise ValueError( + f"Only one of `transpose_b` and `adjoint_b` can be True. " + f"Received `transpose_b`={transpose_b}, " + f"`adjoint_b`={adjoint_b}.") + + if context.executing_eagerly(): + if not ( + isinstance(a, ops.EagerTensor) or _pywrap_utils.IsResourceVariable(a) + ): + a = ops.convert_to_tensor(a, name="a") + if not isinstance(b, ops.EagerTensor) or _pywrap_utils.IsResourceVariable( + b): + b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b") + else: + a = ops.convert_to_tensor(a, name="a") + b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b") + + # TODO(apassos) remove _shape_tuple here when it is not needed. + a_shape = a._shape_tuple() # pylint: disable=protected-access + b_shape = b._shape_tuple() # pylint: disable=protected-access + + output_may_have_non_empty_batch_shape = ( + (a_shape is None or len(a_shape) > 2) or + (b_shape is None or len(b_shape) > 2)) + + # TODO(b/178749687): remove this boolean and all related branches once the + # bridges are ready. + # batch_matmul_v3 is for when input type is different from output type. + use_batch_matmul_v3 = False + if output_type and (output_type != a.dtype or output_type != b.dtype): + use_batch_matmul_v3 = True + + if (not a_is_sparse and + not b_is_sparse) and output_may_have_non_empty_batch_shape: + # BatchMatmul does not support transpose, so we conjugate the matrix and + # use adjoint instead. Conj() is a noop for real matrices. + if transpose_a: + a = conj(a) + adjoint_a = True + if transpose_b: + b = conj(b) + adjoint_b = True + if use_batch_matmul_v3: + return gen_math_ops.batch_mat_mul_v3( + a, + b, + adj_x=adjoint_a, + adj_y=adjoint_b, + Tout=output_type, + grad_x=grad_a, + grad_y=grad_b, + name=name, + ) + else: + return gen_math_ops.batch_mat_mul_v2( + a, + b, + adj_x=adjoint_a, + adj_y=adjoint_b, + grad_x=grad_a, + grad_y=grad_b, + name=name, + ) + + # Neither matmul nor sparse_matmul support adjoint, so we conjugate + # the matrix and use transpose instead. Conj() is a noop for real + # matrices. + if adjoint_a: + a = conj(a) + transpose_a = True + if adjoint_b: + b = conj(b) + transpose_b = True + + use_sparse_matmul = False + if a_is_sparse or b_is_sparse: + sparse_matmul_types = [dtypes.bfloat16, dtypes.float32] + use_sparse_matmul = ( + a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types) + if (((a.dtype == dtypes.bfloat16 and + b.dtype not in (dtypes.int8, dtypes.uint8)) or + (b.dtype == dtypes.bfloat16 and + a.dtype not in (dtypes.int8, dtypes.uint8))) and a.dtype != b.dtype): + # matmul currently doesn't handle mixed-precision inputs other than + # fp16 * int8 which is supported in BatchMatMulV3. + use_sparse_matmul = True + if use_sparse_matmul: + ret = sparse_matmul( + a, + b, + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + name=name) + # sparse_matmul always returns float32, even with + # bfloat16 inputs. This prevents us from configuring bfloat16 training. + # casting to bfloat16 also matches non-sparse matmul behavior better. + if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16: + ret = cast(ret, dtypes.bfloat16) + return ret + else: + if use_batch_matmul_v3: + adjoint_a = adjoint_a or transpose_a + adjoint_b = adjoint_b or transpose_b + return gen_math_ops.batch_mat_mul_v3( + a, + b, + adj_x=adjoint_a, + adj_y=adjoint_b, + Tout=output_type, + grad_x=grad_a, + grad_y=grad_b, + name=name, + ) + else: + return gen_math_ops.mat_mul( + a, + b, + transpose_a=transpose_a, + transpose_b=transpose_b, + grad_a=grad_a, + grad_b=grad_b, + name=name, + ) + + +@tf_export("linalg.matvec") +@dispatch.add_dispatch_support +def matvec(a, + b, + transpose_a=False, + adjoint_a=False, + a_is_sparse=False, + b_is_sparse=False, + name=None): + """Multiplies matrix `a` by vector `b`, producing `a` * `b`. + + The matrix `a` must, following any transpositions, be a tensor of rank >= 2, + with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast + with `shape(b)[:-1]`. + + Both `a` and `b` must be of the same type. The supported types are: + `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`. + + Matrix `a` can be transposed or adjointed (conjugated and transposed) on + the fly by setting one of the corresponding flag to `True`. These are `False` + by default. + + If one or both of the inputs contain a lot of zeros, a more efficient + multiplication algorithm can be used by setting the corresponding + `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. + This optimization is only available for plain matrices/vectors (rank-2/1 + tensors) with datatypes `bfloat16` or `float32`. + + For example: + + ```python + # 2-D tensor `a` + # [[1, 2, 3], + # [4, 5, 6]] + a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) + + # 1-D tensor `b` + # [7, 9, 11] + b = tf.constant([7, 9, 11], shape=[3]) + + # `a` * `b` + # [ 58, 139] + c = tf.linalg.matvec(a, b) + + + # 3-D tensor `a` + # [[[ 1, 2, 3], + # [ 4, 5, 6]], + # [[ 7, 8, 9], + # [10, 11, 12]]] + a = tf.constant(np.arange(1, 13, dtype=np.int32), + shape=[2, 2, 3]) + + # 2-D tensor `b` + # [[13, 14, 15], + # [16, 17, 18]] + b = tf.constant(np.arange(13, 19, dtype=np.int32), + shape=[2, 3]) + + # `a` * `b` + # [[ 86, 212], + # [410, 563]] + c = tf.linalg.matvec(a, b) + ``` + + Args: + a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`, + `complex128` and rank > 1. + b: `Tensor` with same type as `a` and compatible dimensions. + transpose_a: If `True`, `a` is transposed before multiplication. + adjoint_a: If `True`, `a` is conjugated and transposed before + multiplication. + a_is_sparse: If `True`, `a` is treated as a sparse matrix. + b_is_sparse: If `True`, `b` is treated as a sparse matrix. + name: Name for the operation (optional). + + Returns: + A `Tensor` of the same type as `a` and `b` where each inner-most vector is + the product of the corresponding matrices in `a` and vectors in `b`, e.g. if + all transpose or adjoint attributes are `False`: + + `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i. + + Note: This is matrix-vector product, not element-wise product. + + + Raises: + ValueError: If transpose_a and adjoint_a are both set to True. + """ + with ops.name_scope(name, "MatVec", [a, b]) as name: + output = matmul( + a, + array_ops.expand_dims(b, axis=-1), + transpose_a=transpose_a, + adjoint_a=adjoint_a, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse) + return array_ops.squeeze(output, axis=-1) + + +# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF +# functions (e.g. tf.add). +def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring + if ops.is_numpy_style_type_promotion(): + return a._matmul(b) + return matmul(a, b, name=name) +matmul_wrapper.__doc__ = matmul.__doc__ + +sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")( + gen_math_ops.sparse_mat_mul) +tf_export(v1=["sparse_matmul"])(sparse_matmul) +@dispatch.add_dispatch_support + + +def _as_indexed_slices(x, optimize=True): + """Convert 'x' to IndexedSlices. + + Convert a dense Tensor to a block-sparse IndexedSlices. + + Args: + x: Either a Tensor object, or an IndexedSlices object. + optimize: if true, attempt to optimize the conversion of 'x'. + + Returns: + An IndexedSlices object. + + Raises: + TypeError: If 'x' is not a Tensor or an IndexedSlices object. + """ + # TODO(touts): op_scope + if not isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)): + raise TypeError(f"Not a Tensor or IndexedSlices: {type(x)}.") + if isinstance(x, indexed_slices.IndexedSlices): + return x + x_shape = array_ops.shape_internal(x, optimize=optimize) + return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape) + + +def _as_indexed_slices_list(inputs, optimize=True): + """Convert all elements of 'inputs' to IndexedSlices. + + Additionally, homogenize the types of all the indices to + either int32 or int64. + + Args: + inputs: List containing either Tensor or IndexedSlices objects. + optimize: if true, attempt to optimize the conversion of each input. + + Returns: + A list of IndexedSlices objects. + + Raises: + TypeError: If 'inputs' is not a list or a tuple. + """ + if not isinstance(inputs, (list, tuple)): + raise TypeError(f"Expected a list or tuple, not {type(inputs)}.") + outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs] + with_int32_index = [ + o.indices for o in outputs if o.indices.dtype == dtypes.int32 + ] + if not with_int32_index or len(with_int32_index) == len(outputs): + return outputs + casted_outputs = [] + for o in outputs: + if o.indices.dtype == dtypes.int32: + casted_outputs.append( + indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64), + o.dense_shape)) + else: + casted_outputs.append(o) + return casted_outputs + + +@tf_export("math.add", "add") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def add(x, y, name=None): + """Returns x + y element-wise. + + Example usages below. + + Add a scalar and a list: + + >>> x = [1, 2, 3, 4, 5] + >>> y = 1 + >>> tf.add(x, y) + + + Note that binary `+` operator can be used instead: + + >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5]) + >>> y = tf.convert_to_tensor(1) + >>> x + y + + + Add a tensor and a list of same shape: + + >>> x = [1, 2, 3, 4, 5] + >>> y = tf.constant([1, 2, 3, 4, 5]) + >>> tf.add(x, y) + + + **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a + non-tensor, the non-tensor input will adopt (or get casted to) the data type + of the tensor input. This can potentially cause unwanted overflow or underflow + conversion. + + For example, + + >>> x = tf.constant([1, 2], dtype=tf.int8) + >>> y = [2**7 + 1, 2**7 + 2] + >>> tf.add(x, y) + + + When adding two input values of different shapes, `Add` follows NumPy + broadcasting rules. The two input array shapes are compared element-wise. + Starting with the trailing dimensions, the two dimensions either have to be + equal or one of them needs to be `1`. + + For example, + + >>> x = np.ones(6).reshape(1, 2, 1, 3) + >>> y = np.ones(6).reshape(2, 1, 3, 1) + >>> tf.add(x, y).shape.as_list() + [2, 2, 3, 3] + + Another example with two arrays of different dimension. + + >>> x = np.ones([1, 2, 1, 4]) + >>> y = np.ones([3, 4]) + >>> tf.add(x, y).shape.as_list() + [1, 2, 3, 4] + + The reduction version of this elementwise operation is `tf.math.reduce_sum` + + Args: + x: A `tf.Tensor`. Must be one of the following types: bfloat16, half, + float16, float32, float64, uint8, uint16, uint32, uint64, int8, int16, + int32, int64, complex64, complex128, string. + y: A `tf.Tensor`. Must have the same type as x. + name: A name for the operation (optional) + """ + with ops.name_scope(name, "Add", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + if x.dtype == dtypes.string: + return gen_math_ops.add(x, y, name=name) + else: + return gen_math_ops.add_v2(x, y, name=name) + + +@tf_export("math.add_n", "add_n") +@dispatch.add_dispatch_support(iterable_parameters=["inputs"]) +def add_n(inputs, name=None): + """Returns the element-wise sum of a list of tensors. + + All inputs in the list must have the same shape. This op does not + [broadcast](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html) + its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator) + instead. + + For example: + + >>> a = tf.constant([[3, 5], [4, 8]]) + >>> b = tf.constant([[1, 6], [2, 9]]) + >>> tf.math.add_n([a, b, a]).numpy() + array([[ 7, 16], + [10, 25]], dtype=int32) + + See Also: + + * `tf.reduce_sum(inputs, axis=0)` - This performs the same mathematical + operation, but `tf.add_n` may be more efficient because it sums the + tensors directly. `reduce_sum` on the other hand calls + `tf.convert_to_tensor` on the list of tensors, unnecessarily stacking them + into a single tensor before summing. + + Args: + inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the + same shape and type. `tf.IndexedSlices` objects will be converted into + dense tensors prior to adding. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of the same shape and type as the elements of `inputs`. + + Raises: + ValueError: If `inputs` don't all have same shape and dtype or the shape + cannot be inferred. + """ + if not inputs or not isinstance(inputs, collections_abc.Iterable): + raise ValueError("Inputs must be an iterable of at least one " + "Tensor/IndexedSlices with the same dtype and shape.") + inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs) + if not all( + isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) + for x in inputs): + raise ValueError("Inputs must be an iterable of at least one " + "Tensor/IndexedSlices with the same dtype and shape.") + + if len(inputs) == 1: + if isinstance(inputs[0], indexed_slices.IndexedSlices): + values = ops.convert_to_tensor(inputs[0]) + else: + values = inputs[0] + if name: + return array_ops.identity(values, name=name) + return values + return gen_math_ops.add_n(inputs, name=name) + + +@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"]) +@dispatch.add_dispatch_support +@deprecation.deprecated(None, "Use `tf.math.add_n` Instead") +def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None): + """Returns the element-wise sum of a list of tensors. + + Optionally, pass `shape` and `tensor_dtype` for shape and type checking, + otherwise, these are inferred. + + For example: + + >>> a = tf.constant([[1, 2], [3, 4]]) + >>> b = tf.constant([[5, 0], [0, 6]]) + >>> tf.math.accumulate_n([a, b, a]).numpy() + array([[ 7, 4], + [ 6, 14]], dtype=int32) + + >>> # Explicitly pass shape and type + >>> tf.math.accumulate_n( + ... [a, b, a], shape=[2, 2], tensor_dtype=tf.int32).numpy() + array([[ 7, 4], + [ 6, 14]], dtype=int32) + + Note: The input must be a list or tuple. This function does not handle + `IndexedSlices` + + See Also: + + * `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical + operation, but `tf.add_n` may be more efficient because it sums the + tensors directly. `reduce_sum` on the other hand calls + `tf.convert_to_tensor` on the list of tensors, unncessairly stacking them + into a single tensor before summing. + * `tf.add_n` - This is another python wrapper for the same Op. It has + nearly identical functionality. + + Args: + inputs: A list of `Tensor` objects, each with same shape and type. + shape: Expected shape of elements of `inputs` (optional). Also controls the + output shape of this op, which may affect type inference in other ops. A + value of `None` means "infer the input shape from the shapes in `inputs`". + tensor_dtype: Expected data type of `inputs` (optional). A value of `None` + means "infer the input dtype from `inputs[0]`". + name: A name for the operation (optional). + + Returns: + A `Tensor` of same shape and type as the elements of `inputs`. + + Raises: + ValueError: If `inputs` don't all have same shape and dtype or the shape + cannot be inferred. + """ + + def _input_error(): + return ValueError("inputs must be a list of at least one Tensor with the " + "same dtype and shape") + + if not inputs or not isinstance(inputs, (list, tuple)): + raise _input_error() + inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs) + if not all(isinstance(x, tensor_lib.Tensor) for x in inputs): + raise _input_error() + if not all(x.dtype == inputs[0].dtype for x in inputs): + raise _input_error() + if shape is not None: + shape = tensor_shape.as_shape(shape) + else: + shape = tensor_shape.unknown_shape() + for input_tensor in inputs: + if isinstance(input_tensor, tensor_lib.Tensor): + shape = shape.merge_with(input_tensor.get_shape()) + + # tensor_dtype is for safety only; operator's output type computed in C++ + if tensor_dtype is not None and tensor_dtype != inputs[0].dtype: + raise TypeError( + f"The `tensor_dtype` argument is {tensor_dtype}, but `input` is of " + f"type {inputs[0].dtype}. These must be equal. Try casting the input " + f"to the desired type.") + + if len(inputs) == 1 and name is None: + return inputs[0] + elif len(inputs) == 1 and name is not None: + return array_ops.identity(inputs[0], name=name) + return add_n(inputs, name=name) + + +@ops.RegisterGradient("AccumulateNV2") +def _accumulate_n_grad(op, grad): + """Same as gradient for AddN. Copies the gradient to all inputs.""" + # Not broadcasting. + return [grad] * len(op.inputs) + + +@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def sigmoid(x, name=None): + r"""Computes sigmoid of `x` element-wise. + + Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$. + + For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$. + + Example Usage: + + If a positive number is large, then its sigmoid will approach to 1 since the + formula will be `y = / (1 + )` + + >>> x = tf.constant([0.0, 1.0, 50.0, 100.0]) + >>> tf.math.sigmoid(x) + + + If a negative number is large, its sigmoid will approach to 0 since the + formula will be `y = 1 / (1 + )` + + >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0]) + >>> tf.math.sigmoid(x) + + + Args: + x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or + `complex128`. + name: A name for the operation (optional). + + Returns: + A Tensor with the same type as `x`. + + Usage Example: + + >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32) + >>> tf.sigmoid(x) + + + @compatibility(scipy) + Equivalent to scipy.special.expit + @end_compatibility + """ + with ops.name_scope(name, "Sigmoid", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.sigmoid(x, name=name) + + +@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("log_sigmoid") +def log_sigmoid(x, name=None): + """Computes log sigmoid of `x` element-wise. + + Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability, + we use `y = -tf.nn.softplus(-x)`. + + Args: + x: A Tensor with type `float32` or `float64`. + name: A name for the operation (optional). + + Returns: + A Tensor with the same type as `x`. + + Usage Example: + + If a positive number is large, then its log_sigmoid will approach to 0 since + the formula will be `y = log( / (1 + ) )` which + approximates to `log (1)` which is 0. + + >>> x = tf.constant([0.0, 1.0, 50.0, 100.0]) + >>> tf.math.log_sigmoid(x) + + + If a negative number is large, its log_sigmoid will approach to the number + itself since the formula will be `y = log( 1 / (1 + ) )` which is + `log (1) - log ( (1 + ) )` which approximates to `- ` + that is the number itself. + + >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0]) + >>> tf.math.log_sigmoid(x) + + """ + with ops.name_scope(name, "LogSigmoid", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) # pylint: disable=invalid-unary-operand-type + + +@tf_export("math.cumsum", "cumsum") +@dispatch.add_dispatch_support +def cumsum(x, axis=0, exclusive=False, reverse=False, name=None): + """Compute the cumulative sum of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumsum, which means that the first + element of the input is identical to the first element of the output: + For example: + + >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c] + >>> x = tf.constant([2, 4, 6, 8]) + >>> tf.cumsum(x) + + + >>> # using varying `axis` values + >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]]) + >>> tf.cumsum(y, axis=0) + + >>> tf.cumsum(y, axis=1) + + + By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed + instead: + + >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b] + >>> x = tf.constant([2, 4, 6, 8]) + >>> tf.cumsum(x, exclusive=True) + + + By setting the `reverse` kwarg to `True`, the cumsum is performed in the + opposite direction: + + >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c] + >>> x = tf.constant([2, 4, 6, 8]) + >>> tf.cumsum(x, reverse=True) + + + This is more efficient than using separate `tf.reverse` ops. + The `reverse` and `exclusive` kwargs can also be combined: + + >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0] + >>> x = tf.constant([2, 4, 6, 8]) + >>> tf.cumsum(x, exclusive=True, reverse=True) + + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + `complex128`, `qint8`, `quint8`, `qint32`, `half`. + axis: A `Tensor` of type `int32` (default: 0). Must be in the range + `[-rank(x), rank(x))`. + exclusive: If `True`, perform exclusive cumsum. + reverse: A `bool` (default: False). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + with ops.name_scope(name, "Cumsum", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.cumsum( + x, axis, exclusive=exclusive, reverse=reverse, name=name) + + +@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("cumprod") +def cumprod(x, axis=0, exclusive=False, reverse=False, name=None): + """Compute the cumulative product of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumprod, which means that the + first element of the input is identical to the first element of the output: + + ```python + tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c] + ``` + + By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + performed + instead: + + ```python + tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b] + ``` + + By setting the `reverse` kwarg to `True`, the cumprod is performed in the + opposite direction: + + ```python + tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c] + ``` + + This is more efficient than using separate `tf.reverse` ops. + The `reverse` and `exclusive` kwargs can also be combined: + + ```python + tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + `complex128`, `qint8`, `quint8`, `qint32`, `half`. + axis: A `Tensor` of type `int32` (default: 0). Must be in the range + `[-rank(x), rank(x))`. + exclusive: If `True`, perform exclusive cumprod. + reverse: A `bool` (default: False). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + with ops.name_scope(name, "Cumprod", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.cumprod( + x, axis, exclusive=exclusive, reverse=reverse, name=name) + + +@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"]) +@dispatch.add_dispatch_support +def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None): + """Compute the cumulative log-sum-exp of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumulative log-sum-exp, which means + that the first element of the input is identical to the first element of + the output. + + This operation is significantly more numerically stable than the equivalent + tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although + computes the same result given infinite numerical precision. However, note + that in some cases, it may be less stable than `tf.math.reduce_logsumexp` + for a given element, as it applies the "log-sum-exp trick" in a different + way. + + More precisely, where `tf.math.reduce_logsumexp` uses the following trick: + + ``` + log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x) + ``` + + it cannot be directly used here as there is no fast way of applying it + to each prefix `x[:i]`. Instead, this function implements a prefix + scan using pairwise log-add-exp, which is a commutative and associative + (up to floating point precision) operator: + + ``` + log_add_exp(x, y) = log(exp(x) + exp(y)) + = log(1 + exp(min(x, y) - max(x, y))) + max(x, y) + ``` + + However, reducing using the above operator leads to a different computation + tree (logs are taken repeatedly instead of only at the end), and the maximum + is only computed pairwise instead of over the entire prefix. In general, this + leads to a different and slightly less precise computation. + + Args: + x: A `Tensor`. Must be one of the following types: `float16`, `float32`, + `float64`. + axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the + range `[-rank(x), rank(x))`. + exclusive: If `True`, perform exclusive cumulative log-sum-exp. + reverse: If `True`, performs the cumulative log-sum-exp in the reverse + direction. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same shape and type as `x`. + """ + with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.cumulative_logsumexp( + x, axis, exclusive=exclusive, reverse=reverse, name=name) + + +@tf_export("math.conj", v1=["math.conj", "conj"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("conj") +def conj(x, name=None): + r"""Returns the complex conjugate of a complex number. + + Given a tensor `x` of complex numbers, this operation returns a tensor of + complex numbers that are the complex conjugate of each element in `x`. The + complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the + real part and `b` is the imaginary part. + + The complex conjugate returned by this operation is of the form \\(a - bj\\). + + For example: + + >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) + >>> tf.math.conj(x) + + + If `x` is real, it is returned unchanged. + + For example: + + >>> x = tf.constant([-2.25, 3.25]) + >>> tf.math.conj(x) + + + Args: + x: `Tensor` to conjugate. Must have numeric or variant type. + name: A name for the operation (optional). + + Returns: + A `Tensor` that is the conjugate of `x` (with the same type). + + Raises: + TypeError: If `x` is not a numeric tensor. + + @compatibility(numpy) + Equivalent to numpy.conj. + @end_compatibility + """ + if isinstance(x, tensor_lib.Tensor): + dt = x.dtype + if dt.is_floating or dt.is_integer: + return x + with ops.name_scope(name, "Conj", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_complex or x.dtype == dtypes.variant: + return gen_math_ops.conj(x, name=name) + elif x.dtype.is_floating or x.dtype.is_integer: + return x + else: + raise TypeError( + f"Expected numeric or variant tensor, got dtype {x.dtype!r}.") + + +def reduced_shape(input_shape, axes): + """Helper function for reduction ops. + + Args: + input_shape: 1-D Tensor, the shape of the Tensor being reduced. + axes: 1-D Tensor, the reduction axes. + + Returns: + A 1-D Tensor, the output shape as if keepdims were set to True. + """ + # TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to + # `input_shape` rather than `tf.shape` of it. Then we can check if the shape + # is fully defined here, which may be faster executing eagerly than running + # `tf.shape` and then fetching its constant value. + constant_input_shape = tensor_util.constant_value(input_shape) + if constant_input_shape is not None: + constant_axes = tensor_util.constant_value(axes) + if constant_axes is not None: + constant_axes = np.array(constant_axes, dtype=np.int32) + constant_input_shape = np.array(constant_input_shape, dtype=np.int32) + constant_input_shape[constant_axes] = 1 + return constant_input_shape + + # Example: + # cast needed for SparseTensor reductions + input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7] + axes = cast(axes, dtypes.int32) # [1, 2] + + input_rank = array_ops.size(input_shape) # 4 + axes = (axes + input_rank) % input_rank + axes_shape = array_ops.shape(axes) # [2] + return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7] + [ + range(input_rank), # [0, 1, 2, 3] + axes + ], # [1, 2] + [ + input_shape, # [2, 3, 5, 7] + array_ops.ones(axes_shape, dtype=dtypes.int32) + ]) # [1, 1] + + +def _unsorted_segment_N(data, segment_ids, num_segments): + """ Helper function for unsorted_segment_mean/_sqrtN. + + Computes the number + of segment entries with 0-entries set to 1 to allow division by N. + """ + num_segments = ops.convert_to_tensor(num_segments) + # bincount doesn't support negative indices so we use unsorted_segment_sum + segment_ids_shape = array_ops.shape_internal(segment_ids) + ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype) + n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments) + # add dimensions for all non-reduced axes + broadcastable_shape = array_ops.concat( + [num_segments[array_ops.newaxis], + array_ops.ones([array_ops.rank(data) + - array_ops.rank(segment_ids)], + dtype=num_segments.dtype)], + axis=0) + n = array_ops.reshape(n, broadcastable_shape) + return gen_math_ops.maximum(n, 1) + + +@tf_export( + "math.unsorted_segment_mean", + v1=["math.unsorted_segment_mean", "unsorted_segment_mean"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("unsorted_segment_mean") +def unsorted_segment_mean(data, segment_ids, num_segments, name=None): + r"""Computes the mean along segments of a tensor. + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + This operator is similar to the `tf.math.unsorted_segment_sum` operator. + Instead of computing the sum over segments, it computes the mean of all + entries belonging to a segment such that: + + \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples + `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of + occurrences of id \\i\\. + + If there is no entry for a given segment ID `i`, it outputs 0. + + If the given segment ID `i` is negative, the value is dropped and will not + be added to the sum of the segment. + + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + + Args: + data: A `Tensor` with floating point or complex dtype. + segment_ids: An integer tensor whose shape is a prefix of `data.shape`. + The values must be less than `num_segments`. + The values are always validated to be in range on CPU, + never validated on GPU. + num_segments: An integer scalar `Tensor`. The number of distinct segment + IDs. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` + dimensions, which are replaced with a single dimension which has size + `num_segments`. + """ + with ops.name_scope(name, "UnsortedSegmentMean"): + data = ops.convert_to_tensor(data) + segment_ids = ops.convert_to_tensor(segment_ids) + N = _unsorted_segment_N(data, segment_ids, num_segments) + summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments) + return summed / N + + +@tf_export( + "math.unsorted_segment_sqrt_n", + v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n") +def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None): + r"""Computes the sum along segments of a tensor divided by the sqrt(N). + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + This operator is similar to the `tf.math.unsorted_segment_sum` operator. + Additionally to computing the sum over segments, it divides the results by + sqrt(N). + + \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over + tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the + number of occurrences of id \\i\\. + + If there is no entry for a given segment ID `i`, it outputs 0. + + Note that this op only supports floating point and complex dtypes, + due to tf.sqrt only supporting these types. + + If the given segment ID `i` is negative, the value is dropped and will not + be added to the sum of the segment. + + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + + Args: + data: A `Tensor` with floating point or complex dtype. + segment_ids: An integer tensor whose shape is a prefix of `data.shape`. + The values must be in the range `[0, num_segments)`. + The values are always validated to be in range on CPU, + never validated on GPU. + num_segments: An integer scalar `Tensor`. The number of distinct segment + IDs. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` + dimensions, which are replaced with a single dimension which has size + `num_segments`. + """ + with ops.name_scope(name, "UnsortedSegmentSqrtN"): + data = ops.convert_to_tensor(data) + segment_ids = ops.convert_to_tensor(segment_ids) + N = _unsorted_segment_N(data, segment_ids, num_segments) + summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments) + return summed / gen_math_ops.sqrt(N) + + +@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"]) +@deprecation.deprecated_endpoints("sparse_segment_sum") +def sparse_segment_sum( + data, + indices, + segment_ids, + name=None, + num_segments=None, + sparse_gradient=False, +): + r"""Computes the sum along sparse segments of a tensor. + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s + first dimension, selecting a subset of dimension 0, specified by `indices`. + `segment_ids` is allowed to have missing ids, in which case the output will + be zeros at those indices. In those cases `num_segments` is used to determine + the size of the output. + + For example: + + ```python + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + + # Select two rows, one segment. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + # => [[0 0 0 0]] + + # Select two rows, two segment. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + # => [[ 1 2 3 4] + # [-1 -2 -3 -4]] + + # With missing segment ids. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), + num_segments=4) + # => [[ 1 2 3 4] + # [ 0 0 0 0] + # [-1 -2 -3 -4] + # [ 0 0 0 0]] + + # Select all rows, two segments. + tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + # => [[0 0 0 0] + # [5 6 7 8]] + + # Which is equivalent to: + tf.math.segment_sum(c, tf.constant([0, 0, 1])) + ``` + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + name: A name for the operation (optional). + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (`IndexedSlices`) instead of + dense (`Tensor`). The sparse gradient will contain one non-zero row for + each unique index in `indices`. + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + if num_segments is not None: + return gen_math_ops.sparse_segment_sum_with_num_segments( + data=data, + indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + name=name, + ) + else: + return gen_math_ops.sparse_segment_sum( + data=data, + indices=indices, + segment_ids=segment_ids, + sparse_gradient=sparse_gradient, + name=name, + ) + + +@tf_export("sparse.segment_sum", v1=[]) +def sparse_segment_sum_v2( + data, + indices, + segment_ids, + num_segments=None, + name=None, + sparse_gradient=False, +): + r"""Computes the sum along sparse segments of a tensor. + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s + first dimension, selecting a subset of dimension 0, specified by `indices`. + `segment_ids` is allowed to have missing ids, in which case the output will + be zeros at those indices. In those cases `num_segments` is used to determine + the size of the output. + + For example: + + ```python + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + + # Select two rows, one segment. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + # => [[0 0 0 0]] + + # Select two rows, two segment. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + # => [[ 1 2 3 4] + # [-1 -2 -3 -4]] + + # With missing segment ids. + tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), + num_segments=4) + # => [[ 1 2 3 4] + # [ 0 0 0 0] + # [-1 -2 -3 -4] + # [ 0 0 0 0]] + + # Select all rows, two segments. + tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + # => [[0 0 0 0] + # [5 6 7 8]] + + # Which is equivalent to: + tf.math.segment_sum(c, tf.constant([0, 0, 1])) + ``` + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + name: A name for the operation (optional). + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (`IndexedSlices`) instead of + dense (`Tensor`). The sparse gradient will contain one non-zero row for + each unique index in `indices`. + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + return sparse_segment_sum( + data, + indices, + segment_ids, + name=name, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + ) + + +@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"]) +@deprecation.deprecated_endpoints("sparse_segment_mean") +def sparse_segment_mean( + data, + indices, + segment_ids, + name=None, + num_segments=None, + sparse_gradient=False, +): + r"""Computes the mean along sparse segments of a tensor. + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + Like `tf.math.segment_mean`, but `segment_ids` can have rank less than + `data`'s first dimension, selecting a subset of dimension 0, specified by + `indices`. + `segment_ids` is allowed to have missing ids, in which case the output will + be zeros at those indices. In those cases `num_segments` is used to determine + the size of the output. + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + name: A name for the operation (optional). + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (`IndexedSlices`) instead of + dense (`Tensor`). The sparse gradient will contain one non-zero row for + each unique index in `indices`. + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + if num_segments is not None: + return gen_math_ops.sparse_segment_mean_with_num_segments( + data=data, + indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + name=name, + sparse_gradient=sparse_gradient, + ) + else: + return gen_math_ops.sparse_segment_mean( + data=data, + indices=indices, + segment_ids=segment_ids, + name=name, + sparse_gradient=sparse_gradient, + ) + + +@tf_export("sparse.segment_mean", v1=[]) +def sparse_segment_mean_v2( + data, + indices, + segment_ids, + num_segments=None, + name=None, + sparse_gradient=False, +): + r"""Computes the mean along sparse segments of a tensor. + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + Like `tf.math.segment_mean`, but `segment_ids` can have rank less than + `data`'s first dimension, selecting a subset of dimension 0, specified by + `indices`. + `segment_ids` is allowed to have missing ids, in which case the output will + be zeros at those indices. In those cases `num_segments` is used to determine + the size of the output. + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + name: A name for the operation (optional). + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (`IndexedSlices`) instead of + dense (`Tensor`). The sparse gradient will contain one non-zero row for + each unique index in `indices`. + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + return sparse_segment_mean( + data, + indices, + segment_ids, + name=name, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + ) + + +@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"]) +@deprecation.deprecated_endpoints("sparse_segment_sqrt_n") +def sparse_segment_sqrt_n( + data, + indices, + segment_ids, + name=None, + num_segments=None, + sparse_gradient=False, +): + r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N). + + `N` is the size of the segment being reduced. + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + name: A name for the operation (optional). + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (IndexedSlices) instead of dense + (Tensor). + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + if num_segments is not None: + return gen_math_ops.sparse_segment_sqrt_n_with_num_segments( + data=data, + indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + name=name, + sparse_gradient=sparse_gradient, + ) + else: + return gen_math_ops.sparse_segment_sqrt_n( + data=data, + indices=indices, + segment_ids=segment_ids, + name=name, + sparse_gradient=sparse_gradient, + ) + + +@tf_export("sparse.segment_sqrt_n", v1=[]) +def sparse_segment_sqrt_n_v2( + data, + indices, + segment_ids, + num_segments=None, + name=None, + sparse_gradient=False, +): + r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N). + + Read [the section on + segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) + for an explanation of segments. + + Like `tf.sparse.segment_mean`, but instead of dividing by the size of the + segment, `N`, divide by `sqrt(N)` instead. + + Args: + data: A `Tensor` with data that will be assembled in the output. + indices: A 1-D `Tensor` with indices into `data`. Has same rank as + `segment_ids`. + segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values + should be sorted and can be repeated. + num_segments: An optional int32 scalar. Indicates the size of the output + `Tensor`. + name: A name for the operation (optional). + sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the + gradient of this function will be sparse (`IndexedSlices`) instead of + dense (`Tensor`). The sparse gradient will contain one non-zero row for + each unique index in `indices`. + + Returns: + A `tensor` of the shape as data, except for dimension 0 which + has size `k`, the number of segments specified via `num_segments` or + inferred for the last element in `segments_ids`. + """ + return sparse_segment_sqrt_n( + data, + indices, + segment_ids, + name=name, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + ) + + +@tf_export("tensordot", "linalg.tensordot") +@dispatch.add_dispatch_support +def tensordot(a, b, axes, name=None): + r"""Tensor contraction of a and b along specified axes and outer product. + + Tensordot (also known as tensor contraction) sums the product of elements + from `a` and `b` over the indices specified by `axes`. + + This operation corresponds to `numpy.tensordot(a, b, axes)`. + + Example 1: When `a` and `b` are matrices (order 2), the case `axes=1` + is equivalent to matrix multiplication. + + Example 2: When `a` and `b` are matrices (order 2), the case + `axes = [[1], [0]]` is equivalent to matrix multiplication. + + Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives + the outer product, a tensor of order 4. + + Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two + tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor + \\(c_{jklm}\\) whose entry + corresponding to the indices \\((j,k,l,m)\\) is given by: + + \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\). + + In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`. + + Args: + a: `Tensor` of type `float32` or `float64`. + b: `Tensor` with the same type as `a`. + axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k]. + If axes is a scalar, sum over the last N axes of a and the first N axes of + b in order. If axes is a list or `Tensor` the first and second row contain + the set of unique integers specifying axes along which the contraction is + computed, for `a` and `b`, respectively. The number of axes for `a` and + `b` must be equal. If `axes=0`, computes the outer product between `a` and + `b`. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same type as `a`. + + Raises: + ValueError: If the shapes of `a`, `b`, and `axes` are incompatible. + IndexError: If the values in axes exceed the rank of the corresponding + tensor. + """ + + def _tensordot_reshape(a, axes, flipped=False): + """Helper method to perform transpose and reshape for contraction op. + + This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` + using `array_ops.transpose` and `array_ops.reshape`. The method takes a + tensor and performs the correct transpose and reshape operation for a given + set of indices. It returns the reshaped tensor as well as a list of indices + necessary to reshape the tensor again after matrix multiplication. + + Args: + a: `Tensor`. + axes: List or `int32` `Tensor` of unique indices specifying valid axes of + `a`. + flipped: An optional `bool`. Defaults to `False`. If `True`, the method + assumes that `a` is the second argument in the contraction operation. + + Returns: + A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is + the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is + either a list of integers or an `int32` `Tensor`, depending on whether + the shape of a is fully specified, and free_dims_static is either a list + of integers and None values, or None, representing the inferred + static shape of the free dimensions + """ + if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)): + shape_a = a.get_shape().as_list() + axes = [i if i >= 0 else i + len(shape_a) for i in axes] + free = [i for i in builtins.range(len(shape_a)) if i not in axes] + free_dims = [shape_a[i] for i in free] + prod_free = int(np.prod([shape_a[i] for i in free])) + prod_axes = int(np.prod([shape_a[i] for i in axes])) + perm = list(axes) + free if flipped else free + list(axes) + new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes] + if (perm != np.arange(len(shape_a))).any(): + a_trans = array_ops.transpose(a, perm) + else: + a_trans = a + if a_trans.get_shape().as_list() != new_shape: + reshaped_a = array_ops.reshape(a_trans, new_shape) + else: + reshaped_a = a_trans + return reshaped_a, free_dims, free_dims + else: + if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)): + shape_a = a.get_shape().as_list() + axes = [i if i >= 0 else i + len(shape_a) for i in axes] + free = [i for i in builtins.range(len(shape_a)) if i not in axes] + axes_dims = [shape_a[i] for i in axes] + free_dims = [shape_a[i] for i in free] + free_dims_static = free_dims + axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes") + free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free") + shape_a = array_ops.shape(a) + else: + free_dims_static = None + shape_a = array_ops.shape(a) + rank_a = array_ops.rank(a) + axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes") + axes = array_ops.where(axes >= 0, axes, axes + rank_a) + free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32) + free_dims = array_ops.gather(shape_a, free) + axes_dims = array_ops.gather(shape_a, axes) + prod_free_dims = reduce_prod(free_dims) + prod_axes_dims = reduce_prod(axes_dims) + if flipped: + perm = array_ops.concat([axes, free], 0) + new_shape = array_ops_stack.stack([prod_axes_dims, prod_free_dims]) + else: + perm = array_ops.concat([free, axes], 0) + new_shape = array_ops_stack.stack([prod_free_dims, prod_axes_dims]) + reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape) + return reshaped_a, free_dims, free_dims_static + + def _tensordot_axes(a, axes): + """Generates two sets of contraction axes for the two tensor arguments.""" + a_shape = a.get_shape() + if isinstance(axes, compat.integral_types): + if axes < 0: + raise ValueError(f"`axes` must be at least 0. Received: {axes}.") + if a_shape.ndims is not None: + if axes > a_shape.ndims: + raise ValueError(f"`axes` must not be larger than the number of " + f"dimensions of tensor {a}. Received {axes}, vs " + f"tensor dimensions {a_shape.ndims}.") + return (list(builtins.range(a_shape.ndims - axes, + a_shape.ndims)), list(builtins.range(axes))) + else: + rank = array_ops.rank(a) + return (range(rank - axes, rank, + dtype=dtypes.int32), range(axes, dtype=dtypes.int32)) + elif isinstance(axes, (list, tuple)): + if len(axes) != 2: + raise ValueError( + f"`axes` must be an integer or have length 2. Received {axes}.") + a_axes = axes[0] + b_axes = axes[1] + if isinstance(a_axes, compat.integral_types) and \ + isinstance(b_axes, compat.integral_types): + a_axes = [a_axes] + b_axes = [b_axes] + if len(a_axes) != len(b_axes): + raise ValueError(f"Different number of contraction axes `a` and `b`, " + f"{len(a_axes)} != {len(b_axes)}.") + return a_axes, b_axes + else: + axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32) + return axes[0], axes[1] + + with ops.name_scope(name, "Tensordot", [a, b, axes]) as name: + a = ops.convert_to_tensor(a, name="a") + b = ops.convert_to_tensor(b, name="b") + a_axes, b_axes = _tensordot_axes(a, axes) + a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes) + b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape( + b, b_axes, True) + ab_matmul = matmul(a_reshape, b_reshape) + if isinstance(a_free_dims, list) and isinstance(b_free_dims, list): + if (ab_matmul.get_shape().is_fully_defined() and + ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims): + return ab_matmul + else: + return array_ops.reshape( + ab_matmul, a_free_dims + b_free_dims, name=name) + else: + a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32) + b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32) + product = array_ops.reshape( + ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name) + if a_free_dims_static is not None and b_free_dims_static is not None: + product.set_shape(a_free_dims_static + b_free_dims_static) + return product + + +@tf_export("math.polyval") +@dispatch.add_dispatch_support +def polyval(coeffs, x, name=None): + r"""Computes the elementwise value of a polynomial. + + If `x` is a tensor and `coeffs` is a list n + 1 tensors, + this function returns the value of the n-th order polynomial + + `p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)` + + evaluated using Horner's method, i.e. + + ```python + p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0])) + ``` + + Usage Example: + + >>> coefficients = [1.0, 2.5, -4.2] + >>> x = 5.0 + >>> y = tf.math.polyval(coefficients, x) + >>> y + + + Usage Example: + + >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0) + + + `tf.math.polyval` can also be used in polynomial regression. Taking + advantage of this function can facilitate writing a polynomial equation + as compared to explicitly writing it out, especially for higher degree + polynomials. + + >>> x = tf.constant(3) + >>> theta1 = tf.Variable(2) + >>> theta2 = tf.Variable(1) + >>> theta3 = tf.Variable(0) + >>> tf.math.polyval([theta1, theta2, theta3], x) + + + Args: + coeffs: A list of `Tensor` representing the coefficients of the polynomial. + x: A `Tensor` representing the variable of the polynomial. + name: A name for the operation (optional). + + Returns: + A `tensor` of the shape as the expression p(x) with usual broadcasting + rules for element-wise addition and multiplication applied. + + @compatibility(numpy) + Equivalent to numpy.polyval. + @end_compatibility + """ + if not isinstance(coeffs, list): + raise ValueError( + f"Argument coeffs must be list type. Received type {type(coeffs)}.") + + with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name: + x = ops.convert_to_tensor(x, name="x") + if len(coeffs) < 1: + return array_ops.zeros_like(x, name=name) + coeffs = [ + ops.convert_to_tensor(coeff, name=("coeff_%d" % index)) + for index, coeff in enumerate(coeffs) + ] + p = coeffs[0] + for c in coeffs[1:]: + p = c + p * x + return p + + +@tf_export("math.reciprocal_no_nan") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def reciprocal_no_nan(x, name=None): + """Performs a safe reciprocal operation, element wise. + + If a particular element is zero, the reciprocal for that element is + also set to zero. + + For example: + ```python + x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32) + tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ] + ``` + + Args: + x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or + `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of same shape and type as `x`. + + Raises: + TypeError: x must be of a valid dtype. + + """ + + with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope: + x = ops.convert_to_tensor(x, name="x") + one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one") + return gen_math_ops.div_no_nan(one, x, name=scope) + + +@tf_export("math.xdivy") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def xdivy(x, y, name=None): + """Computes `x / y`. + + Given `x` and `y`, computes `x / y`. This function safely returns + zero when `x = 0`, no matter what the value of `y` is. + + Example: + + >>> tf.math.xdivy(1., 2.) + + >>> tf.math.xdivy(0., 1.) + + >>> tf.math.xdivy(0., 0.) + + >>> tf.math.xdivy(1., 0.) + + + Args: + x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`, + `complex128` + y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`, + `complex128` + name: A name for the operation (optional). + + Returns: + `x / y`. + """ + with ops.name_scope(name, "xdivy", [x]): + return gen_math_ops.xdivy(x, y) + + +@tf_export("math.xlog1py") +@dispatch.register_binary_elementwise_api +@dispatch.add_dispatch_support +def xlog1py(x, y, name=None): + r"""Compute x * log1p(y). + + Given `x` and `y`, compute `x * log1p(y)`. This function safely returns + zero when `x = 0`, no matter what the value of `y` is. + + Example: + + >>> tf.math.xlog1py(0., 1.) + + >>> tf.math.xlog1py(1., 1.) + + >>> tf.math.xlog1py(2., 2.) + + >>> tf.math.xlog1py(0., -1.) + + + Args: + x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`, + `complex128` + y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`, + `complex128` + name: A name for the operation (optional). + + Returns: + `x * log1p(y)`. + + @compatibility(scipy) + Equivalent to scipy.special.xlog1py + @end_compatibility + """ + with ops.name_scope(name, "xlog1py", [x]): + return gen_math_ops.xlog1py(x, y) + + +@tf_export("math.erfinv") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def erfinv(x, name=None): + """Compute inverse error function. + + Given `x`, compute the inverse error function of `x`. This function + is the inverse of `tf.math.erf`. + + Args: + x: `Tensor` with type `float` or `double`. + name: A name for the operation (optional). + Returns: + Inverse error function of `x`. + """ + with ops.name_scope(name, "erfinv", [x]): + return gen_math_ops.erfinv(x) + + +@tf_export("math.ndtri") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def ndtri(x, name=None): + """Compute quantile of Standard Normal. + + Args: + x: `Tensor` with type `float` or `double`. + name: A name for the operation (optional). + Returns: + Inverse error function of `x`. + """ + with ops.name_scope(name, "ndtri", [x]): + return gen_math_ops.ndtri(x) + + +@tf_export("math.erfcinv") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def erfcinv(x, name=None): + """Computes the inverse of complementary error function. + + Given `x`, compute the inverse complementary error function of `x`. + This function is the inverse of `tf.math.erfc`, and is defined on + `[0, 2]`. + + >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.]) + + + Args: + x: `Tensor` with type `float` or `double`. + name: A name for the operation (optional). + Returns: + Inverse complementary error function of `x`. + + @compatibility(numpy) + Equivalent to scipy.special.erfcinv + @end_compatibility + """ + with ops.name_scope(name, "erfcinv", [x]): + x = ops.convert_to_tensor(x, name="start") + return -ndtri(0.5 * x) * np.sqrt(0.5) + + +@tf_export("math.ceil", v1=["math.ceil", "ceil"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("ceil") +def ceil(x, name=None): + """Return the ceiling of the input, element-wise. + + For example: + + >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) + + + Args: + x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, + `float32`, `float64`. `int32` + name: A name for the operation (optional). + + Returns: + A `tf.Tensor`. Has the same type as `x`. + + @compatibility(numpy) + Equivalent to np.ceil + @end_compatibility + """ + return gen_math_ops.ceil(x, name) + + +@tf_export("math.sqrt", "sqrt") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def sqrt(x, name=None): # pylint: disable=redefined-builtin + r"""Computes element-wise square root of the input tensor. + + Note: This operation does not support integer types. + + >>> x = tf.constant([[4.0], [16.0]]) + >>> tf.sqrt(x) + + >>> y = tf.constant([[-4.0], [16.0]]) + >>> tf.sqrt(y) + + >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128) + >>> tf.sqrt(z) + + + Note: In order to support complex type, please provide an input tensor + of `complex64` or `complex128`. + + Args: + x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`, + `complex64`, `complex128` + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of same size, type and sparsity as `x`. + """ + return gen_math_ops.sqrt(x, name) + + +# pylint: disable=g-docstring-has-escape +@tf_export("math.exp", "exp") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def exp(x, name=None): + r"""Computes exponential of x element-wise. \\(y = e^x\\). + + This function computes the exponential of the input tensor element-wise. + i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor. + \\(e\\) denotes Euler's number and is approximately equal to 2.718281. + Output is positive for any real input. + + >>> x = tf.constant(2.0) + >>> tf.math.exp(x) + + + >>> x = tf.constant([2.0, 8.0]) + >>> tf.math.exp(x) + + + For complex numbers, the exponential value is calculated as + $$ + e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)}) + $$ + + For `1+1j` the value would be computed as: + $$ + e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j) + $$ + + >>> x = tf.constant(1 + 1j) + >>> tf.math.exp(x) + + + Args: + x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, + `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor`. Has the same type as `x`. + + @compatibility(numpy) + Equivalent to np.exp + @end_compatibility + """ + return gen_math_ops.exp(x, name) + + +# pylint: enable=g-docstring-has-escape + + +@tf_export("math.sobol_sample") +@dispatch.add_dispatch_support +def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None): + """Generates points from the Sobol sequence. + + Creates a Sobol sequence with `num_results` samples. Each sample has dimension + `dim`. Skips the first `skip` samples. + + Args: + dim: Positive scalar `Tensor` representing each sample's dimension. + num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol + points to return in the output. + skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of + initial points of the Sobol sequence to skip. Default value is 0. + dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or + `tf.float64`. Defaults to `tf.float32`. + name: (Optional) Python `str` name prefixed to ops created by this function. + + Returns: + `Tensor` of samples from Sobol sequence with `shape` [num_results, dim]. + """ + with ops.name_scope(name, "sobol", [dim, num_results, skip]): + return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype) + + +@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("rsqrt") +def rsqrt(x, name=None): + """Computes reciprocal of square root of x element-wise. + + For example: + + >>> x = tf.constant([2., 0., -2.]) + >>> tf.math.rsqrt(x) + + + Args: + x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor`. Has the same type as `x`. + """ + return gen_math_ops.rsqrt(x, name) + + +@tf_export("math.acos", "acos") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def acos(x, name=None): + """Computes acos of x element-wise. + + Provided an input tensor, the `tf.math.acos` operation + returns the inverse cosine of each element of the tensor. + If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + + Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + + For example: + + >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32) + >>> tf.math.acos(x) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, + `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as x. + """ + return gen_math_ops.acos(x, name) + + +@tf_export("math.floor", "floor") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def floor(x, name=None): + """Returns element-wise largest integer not greater than x. + + Both input range is `(-inf, inf)` and the + output range consists of all integer values. + + For example: + + >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")]) + >>> tf.floor(x).numpy() + array([ 1., -2., 5., -3., 0., inf], dtype=float32) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as x. + """ + return gen_math_ops.floor(x, name) + + +# Register elementwise ops that don't have Python wrappers. +# Binary elementwise ops. +dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_and) +dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_or) +dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_xor) +dispatch.register_binary_elementwise_api(gen_bitwise_ops.left_shift) +dispatch.register_binary_elementwise_api(gen_bitwise_ops.right_shift) +dispatch.register_unary_elementwise_api(gen_bitwise_ops.invert) +dispatch.register_binary_elementwise_api(gen_math_ops.atan2) +dispatch.register_binary_elementwise_api(gen_math_ops.floor_div) +dispatch.register_binary_elementwise_api(gen_math_ops.floor_mod) +dispatch.register_binary_elementwise_api(gen_math_ops.greater) +dispatch.register_binary_elementwise_api(gen_math_ops.greater_equal) +dispatch.register_binary_elementwise_api(gen_math_ops.less) +dispatch.register_binary_elementwise_api(gen_math_ops.less_equal) +dispatch.register_binary_elementwise_api(gen_math_ops.logical_and) +dispatch.register_binary_elementwise_api(gen_math_ops.logical_or) +dispatch.register_binary_elementwise_api(gen_math_ops.maximum) +dispatch.register_binary_elementwise_api(gen_math_ops.minimum) +dispatch.register_binary_elementwise_api(gen_math_ops.real_div) +dispatch.register_binary_elementwise_api(gen_math_ops.squared_difference) +dispatch.register_binary_elementwise_api(gen_math_ops.truncate_div) +dispatch.register_binary_elementwise_api(gen_math_ops.truncate_mod) +dispatch.register_binary_elementwise_api(gen_math_ops.xlogy) +dispatch.register_binary_elementwise_api(gen_math_ops.zeta) + +# Unary elementwise ops. +dispatch.register_unary_elementwise_api(gen_math_ops.acosh) +dispatch.register_unary_elementwise_api(gen_math_ops.asin) +dispatch.register_unary_elementwise_api(gen_math_ops.asinh) +dispatch.register_unary_elementwise_api(gen_math_ops.atan) +dispatch.register_unary_elementwise_api(gen_math_ops.atanh) +dispatch.register_unary_elementwise_api(gen_math_ops.cos) +dispatch.register_unary_elementwise_api(gen_math_ops.cosh) +dispatch.register_unary_elementwise_api(gen_math_ops.digamma) +dispatch.register_unary_elementwise_api(gen_math_ops.erf) +dispatch.register_unary_elementwise_api(gen_math_ops.erfc) +dispatch.register_unary_elementwise_api(gen_math_ops.expm1) +dispatch.register_unary_elementwise_api(gen_math_ops.is_finite) +dispatch.register_unary_elementwise_api(gen_math_ops.is_inf) +dispatch.register_unary_elementwise_api(gen_math_ops.is_nan) +dispatch.register_unary_elementwise_api(gen_math_ops.lgamma) +dispatch.register_unary_elementwise_api(gen_math_ops.log) +dispatch.register_unary_elementwise_api(gen_math_ops.log1p) +dispatch.register_unary_elementwise_api(gen_math_ops.logical_not) +dispatch.register_unary_elementwise_api(gen_math_ops.neg) +dispatch.register_unary_elementwise_api(gen_math_ops.next_after) +dispatch.register_unary_elementwise_api(gen_math_ops.reciprocal) +dispatch.register_unary_elementwise_api(gen_math_ops.rint) +dispatch.register_unary_elementwise_api(gen_math_ops.sin) +dispatch.register_unary_elementwise_api(gen_math_ops.sinh) +dispatch.register_unary_elementwise_api(gen_math_ops.square) +dispatch.register_unary_elementwise_api(gen_math_ops.tan) +dispatch.register_unary_elementwise_api(gen_math_ops.tanh) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn.py new file mode 100644 index 0000000000000000000000000000000000000000..753f1708494d6358deaa2b9668249c11eaffc14d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn.py @@ -0,0 +1,42 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +# pylint: disable=unused-import,g-bad-import-order +"""Neural network support. + +See the [Neural network](https://tensorflow.org/api_guides/python/nn) guide. +""" +import sys as _sys + +# pylint: disable=unused-import +from tensorflow.python.ops import ctc_ops as _ctc_ops +from tensorflow.python.ops import embedding_ops as _embedding_ops +from tensorflow.python.ops import nn_grad as _nn_grad +from tensorflow.python.ops import nn_fused_batch_norm_grad as _nn_fused_batch_norm_grad +from tensorflow.python.ops import nn_ops as _nn_ops +from tensorflow.python.ops.math_ops import sigmoid +from tensorflow.python.ops.math_ops import tanh +# pylint: enable=unused-import + +# Bring more nn-associated functionality into this package. +# go/tf-wildcard-import +# pylint: disable=wildcard-import,unused-import +from tensorflow.python.ops.ctc_ops import * +from tensorflow.python.ops.nn_impl import * +from tensorflow.python.ops.nn_impl_distribute import * +from tensorflow.python.ops.nn_ops import * +from tensorflow.python.ops.candidate_sampling_ops import * +from tensorflow.python.ops.embedding_ops import * +# pylint: enable=wildcard-import,unused-import diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b2f624e106bf931a85300c3f76a859678bc46f58 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_ops.py @@ -0,0 +1,6697 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Primitive Neural Net (NN) Operations. + +## Notes on padding + +Several neural network operations, such as `tf.nn.conv2d` and +`tf.nn.max_pool2d`, take a `padding` parameter, which controls how the input is +padded before running the operation. The input is padded by inserting values +(typically zeros) before and after the tensor in each spatial dimension. The +`padding` parameter can either be the string `'VALID'`, which means use no +padding, or `'SAME'` which adds padding according to a formula which is +described below. Certain ops also allow the amount of padding per dimension to +be explicitly specified by passing a list to `padding`. + +In the case of convolutions, the input is padded with zeros. In case of pools, +the padded input values are ignored. For example, in a max pool, the sliding +window ignores padded values, which is equivalent to the padded values being +`-infinity`. + +### `'VALID'` padding + +Passing `padding='VALID'` to an op causes no padding to be used. This causes the +output size to typically be smaller than the input size, even when the stride is +one. In the 2D case, the output size is computed as: + +```python +out_height = ceil((in_height - filter_height + 1) / stride_height) +out_width = ceil((in_width - filter_width + 1) / stride_width) +``` + +The 1D and 3D cases are similar. Note `filter_height` and `filter_width` refer +to the filter size after dilations (if any) for convolutions, and refer to the +window size for pools. + +### `'SAME'` padding + +With `'SAME'` padding, padding is applied to each spatial dimension. When the +strides are 1, the input is padded such that the output size is the same as the +input size. In the 2D case, the output size is computed as: + +```python +out_height = ceil(in_height / stride_height) +out_width = ceil(in_width / stride_width) +``` + +The amount of padding used is the smallest amount that results in the output +size. The formula for the total amount of padding per dimension is: + +```python +if (in_height % strides[1] == 0): + pad_along_height = max(filter_height - stride_height, 0) +else: + pad_along_height = max(filter_height - (in_height % stride_height), 0) +if (in_width % strides[2] == 0): + pad_along_width = max(filter_width - stride_width, 0) +else: + pad_along_width = max(filter_width - (in_width % stride_width), 0) +``` + +Finally, the padding on the top, bottom, left and right are: + +```python +pad_top = pad_along_height // 2 +pad_bottom = pad_along_height - pad_top +pad_left = pad_along_width // 2 +pad_right = pad_along_width - pad_left +``` + +Note that the division by 2 means that there might be cases when the padding on +both sides (top vs bottom, right vs left) are off by one. In this case, the +bottom and right sides always get the one additional padded pixel. For example, +when pad_along_height is 5, we pad 2 pixels at the top and 3 pixels at the +bottom. Note that this is different from existing libraries such as PyTorch and +Caffe, which explicitly specify the number of padded pixels and always pad the +same number of pixels on both sides. + +Here is an example of `'SAME'` padding: + +>>> in_height = 5 +>>> filter_height = 3 +>>> stride_height = 2 +>>> +>>> in_width = 2 +>>> filter_width = 2 +>>> stride_width = 1 +>>> +>>> inp = tf.ones((2, in_height, in_width, 2)) +>>> filter = tf.ones((filter_height, filter_width, 2, 2)) +>>> strides = [stride_height, stride_width] +>>> output = tf.nn.conv2d(inp, filter, strides, padding='SAME') +>>> output.shape[1] # output_height: ceil(5 / 2) +3 +>>> output.shape[2] # output_width: ceil(2 / 1) +2 + +### Explicit padding + +Certain ops, like `tf.nn.conv2d`, also allow a list of explicit padding amounts +to be passed to the `padding` parameter. This list is in the same format as what +is passed to `tf.pad`, except the padding must be a nested list, not a tensor. +For example, in the 2D case, the list is in the format `[[0, 0], [pad_top, +pad_bottom], [pad_left, pad_right], [0, 0]]` when `data_format` is its default +value of `'NHWC'`. The two `[0, 0]` pairs indicate the batch and channel +dimensions have no padding, which is required, as only spatial dimensions can +have padding. + +For example: + +>>> inp = tf.ones((1, 3, 3, 1)) +>>> filter = tf.ones((2, 2, 1, 1)) +>>> strides = [1, 1] +>>> padding = [[0, 0], [1, 2], [0, 1], [0, 0]] +>>> output = tf.nn.conv2d(inp, filter, strides, padding=padding) +>>> tuple(output.shape) +(1, 5, 3, 1) +>>> # Equivalently, tf.pad can be used, since convolutions pad with zeros. +>>> inp = tf.pad(inp, padding) +>>> # 'VALID' means to use no padding in conv2d (we already padded inp) +>>> output2 = tf.nn.conv2d(inp, filter, strides, padding='VALID') +>>> tf.debugging.assert_equal(output, output2) + +### Difference between convolution and pooling layers +How padding is used in convolution layers and pooling layers is different. For +convolution layers, padding is filled with values of zero, and padding is +multiplied with kernels. For pooling layers, padding is excluded from the +computation. For example when applying average pooling to a 4x4 grid, how much +padding is added will not impact the output. Here is an example that +demonstrates the difference. + +>>> x_in = np.array([[ +... [[2], [2]], +... [[1], [1]], +... [[1], [1]]]]) +>>> kernel_in = np.array([ # simulate the avg_pool with conv2d +... [ [[0.25]], [[0.25]] ], +... [ [[0.25]], [[0.25]] ]]) +>>> x = tf.constant(x_in, dtype=tf.float32) +>>> kernel = tf.constant(kernel_in, dtype=tf.float32) +>>> conv_out = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME') +>>> pool_out = tf.nn.avg_pool(x, [2, 2], strides=[1, 1, 1, 1], padding='SAME') +>>> print(conv_out.shape, pool_out.shape) +(1, 3, 2, 1) (1, 3, 2, 1) +>>> tf.reshape(conv_out, [3, 2]).numpy() # conv2d takes account of padding +array([[1.5 , 0.75], + [1. , 0.5 ], + [0.5 , 0.25]], dtype=float32) +>>> tf.reshape(pool_out, [3, 2]).numpy() # avg_pool excludes padding +array([[1.5, 1.5], + [1. , 1. ], + [1. , 1. ]], dtype=float32) + +API docstring: tensorflow.nn +""" + +import functools +import numbers + +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import config +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl +from tensorflow.python.framework import graph_util +from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_nn_ops +from tensorflow.python.ops import math_ops +# Ensure all gradients are registered for nn_ops +from tensorflow.python.ops import nn_grad # pylint: disable=unused-import +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import stateless_random_ops +from tensorflow.python.ops import variables as variables_lib +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_nn_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.platform import device_context +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.compat import collections_abc +from tensorflow.python.util.deprecation import deprecated_args +from tensorflow.python.util.deprecation import deprecated_argument_lookup + +from tensorflow.python.util.tf_export import tf_export + +# Aliases for some automatically-generated names. +local_response_normalization = gen_nn_ops.lrn + +# pylint: disable=protected-access +# pylint: disable=g-classes-have-attributes + +# Acceptable channels last formats (robust to H, W, D order). +_CHANNELS_LAST_FORMATS = frozenset({ + "NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC", + "NWHDC" +}) + + +def _get_sequence(value, n, channel_index, name): + """Formats a value input for gen_nn_ops.""" + # Performance is fast-pathed for common cases: + # `None`, `list`, `tuple` and `int`. + if value is None: + return [1] * (n + 2) + + # Always convert `value` to a `list`. + if isinstance(value, list): + pass + elif isinstance(value, tuple): + value = list(value) + elif isinstance(value, int): + value = [value] + elif not isinstance(value, collections_abc.Sized): + value = [value] + else: + value = list(value) # Try casting to a list. + + len_value = len(value) + + # Fully specified, including batch and channel dims. + if len_value == n + 2: + return value + + # Apply value to spatial dims only. + if len_value == 1: + value = value * n # Broadcast to spatial dimensions. + elif len_value != n: + raise ValueError(f"{name} should be of length 1, {n} or {n + 2}. " + f"Received: {name}={value} of length {len_value}") + + # Add batch and channel dims (always 1). + if channel_index == 1: + return [1, 1] + value + else: + return [1] + value + [1] + + +def _non_atrous_convolution( + input, # pylint: disable=redefined-builtin + filter, # pylint: disable=redefined-builtin + padding, + data_format=None, # pylint: disable=redefined-builtin + strides=None, + name=None): + """Computes sums of N-D convolutions (actually cross correlation). + + It is required that 1 <= N <= 3. + + This is used to implement the more generic `convolution` function, which + extends the interface of this function with a `dilation_rate` parameter. + + Args: + + input: Rank N+2 tensor of type T of shape + `[batch_size] + input_spatial_shape + [in_channels]` if `data_format` + does not start with `"NC"`, or + `[batch_size, in_channels] + input_spatial_shape` if `data_format` starts + with `"NC"`. + filter: Rank N+2 tensor of type T of shape + `filter_spatial_shape + [in_channels, out_channels]`. Rank of either + `input` or `filter` must be known. + padding: Padding method to use, must be either "VALID" or "SAME". + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + strides: Sequence of N positive integers, defaults to `[1] * N`. + name: Name prefix to use. + + Returns: + Rank N+2 tensor of type T of shape + `[batch_size] + output_spatial_shape + [out_channels]`, where + if padding == "SAME": + output_spatial_shape = input_spatial_shape + if padding == "VALID": + output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1. + + Raises: + ValueError: if ranks are incompatible. + + """ + with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope: + input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin + input_shape = input.shape + filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin + filter_shape = filter.shape + op = _NonAtrousConvolution( + input_shape, + filter_shape=filter_shape, + padding=padding, + data_format=data_format, + strides=strides, + name=scope) + return op(input, filter) + + +class _NonAtrousConvolution: + """Helper class for _non_atrous_convolution. + + Note that this class assumes that shapes of input and filter passed to + `__call__` are compatible with `input_shape` and filter_shape passed to the + constructor. + + Args: + input_shape: static input shape, i.e. input.shape. + filter_shape: static filter shape, i.e. filter.shape. + padding: see _non_atrous_convolution. + data_format: see _non_atrous_convolution. + strides: see _non_atrous_convolution. + name: see _non_atrous_convolution. + num_batch_dims: (Optional.) The number of batch dimensions in the input; + if not provided, the default of `1` is used. + """ + + def __init__( + self, + input_shape, + filter_shape, + padding, + data_format=None, + strides=None, + name=None, + num_batch_dims=1): + # filter shape is always rank num_spatial_dims + 2 + # and num_spatial_dims == input_shape.ndims - num_batch_dims - 1 + if input_shape.ndims is not None: + filter_shape = filter_shape.with_rank( + input_shape.ndims - num_batch_dims + 1) + self.padding = padding + self.name = name + # input shape is == num_spatial_dims + num_batch_dims + 1 + # and filter_shape is always rank num_spatial_dims + 2 + if filter_shape.ndims is not None: + input_shape = input_shape.with_rank( + filter_shape.ndims + num_batch_dims - 1) + if input_shape.ndims is None: + raise ValueError( + "Rank of convolution must be known. " + f"Received: input_shape={input_shape} of rank {input_shape.rank}") + if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5: + raise ValueError( + "`input_shape.rank - num_batch_dims + 1` must be at least 3 and at " + f"most 5. Received: input_shape.rank={input_shape.rank} and " + f"num_batch_dims={num_batch_dims}") + conv_dims = input_shape.ndims - num_batch_dims - 1 + if strides is None: + strides = [1] * conv_dims + elif len(strides) != conv_dims: + raise ValueError( + f"`len(strides)` should be {conv_dims}. " + f"Received: strides={strides} of length {len(strides)}") + if conv_dims == 1: + # conv1d uses the 2-d data format names + if data_format is None: + data_format = "NWC" + elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}: + raise ValueError("`data_format` must be 'NWC' or 'NCW'. " + f"Received: data_format={data_format}") + self.strides = strides[0] + self.data_format = data_format + self.conv_op = self._conv1d + elif conv_dims == 2: + if data_format is None or data_format == "NHWC": + data_format = "NHWC" + strides = [1] + list(strides) + [1] + elif data_format == "NCHW": + strides = [1, 1] + list(strides) + else: + raise ValueError("`data_format` must be 'NHWC' or 'NCHW'. " + f"Received: data_format={data_format}") + self.strides = strides + self.data_format = data_format + self.conv_op = conv2d + elif conv_dims == 3: + if data_format is None or data_format == "NDHWC": + strides = [1] + list(strides) + [1] + elif data_format == "NCDHW": + strides = [1, 1] + list(strides) + else: + raise ValueError("`data_format` must be 'NDHWC' or 'NCDHW'. " + f"Received: data_format={data_format}") + self.strides = strides + self.data_format = data_format + self.conv_op = _conv3d_expanded_batch + + # Note that we need this adapter since argument names for conv1d don't match + # those for gen_nn_ops.conv2d and gen_nn_ops.conv3d. + # pylint: disable=redefined-builtin + def _conv1d(self, input, filter, strides, padding, data_format, name): + return conv1d( + value=input, + filters=filter, + stride=strides, + padding=padding, + data_format=data_format, + name=name) + # pylint: enable=redefined-builtin + + def __call__(self, inp, filter): # pylint: disable=redefined-builtin + return self.conv_op( + input=inp, + filter=filter, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + name=self.name) + + +def squeeze_batch_dims(inp, op, inner_rank, name=None): + """Returns `unsqueeze_batch(op(squeeze_batch(inp)))`. + + Where `squeeze_batch` reshapes `inp` to shape + `[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]` + and `unsqueeze_batch` does the reverse reshape but on the output. + + Args: + inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape` + is length `inner_rank`. + op: A callable that takes a single input tensor and returns a single. + output tensor. + inner_rank: A python integer. + name: A string. + + Returns: + `unsqueeze_batch_op(squeeze_batch(inp))`. + """ + with ops.name_scope(name, "squeeze_batch_dims", [inp]): + inp = ops.convert_to_tensor(inp, name="input") + shape = inp.shape + + inner_shape = shape[-inner_rank:] + if not inner_shape.is_fully_defined(): + inner_shape = array_ops.shape(inp)[-inner_rank:] + + batch_shape = shape[:-inner_rank] + if not batch_shape.is_fully_defined(): + batch_shape = array_ops.shape(inp)[:-inner_rank] + + if isinstance(inner_shape, tensor_shape.TensorShape): + inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list()) + else: + inp_reshaped = array_ops.reshape( + inp, array_ops.concat(([-1], inner_shape), axis=-1)) + + out_reshaped = op(inp_reshaped) + + out_inner_shape = out_reshaped.shape[-inner_rank:] + if not out_inner_shape.is_fully_defined(): + out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:] + + out = array_ops.reshape( + out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1)) + + out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:]) + return out + + +@tf_export("nn.dilation2d", v1=[]) +@dispatch.add_dispatch_support +def dilation2d_v2( + input, # pylint: disable=redefined-builtin + filters, # pylint: disable=redefined-builtin + strides, + padding, + data_format, + dilations, + name=None): + """Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors. + + The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + input channel is processed independently of the others with its own + structuring function. The `output` tensor has shape + `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + tensor depend on the `padding` algorithm. We currently only support the + default "NHWC" `data_format`. + + In detail, the grayscale morphological 2-D dilation is the max-sum correlation + (for consistency with `conv2d`, we use unmirrored filters): + + output[b, y, x, c] = + max_{dy, dx} input[b, + strides[1] * y + rates[1] * dy, + strides[2] * x + rates[2] * dx, + c] + + filters[dy, dx, c] + + Max-pooling is a special case when the filter has size equal to the pooling + kernel size and contains all zeros. + + Note on duality: The dilation of `input` by the `filters` is equal to the + negation of the erosion of `-input` by the reflected `filters`. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, + `uint32`, `uint64`. + 4-D with shape `[batch, in_height, in_width, depth]`. + filters: A `Tensor`. Must have the same type as `input`. + 3-D with shape `[filter_height, filter_width, depth]`. + strides: A list of `ints` that has length `>= 4`. + The stride of the sliding window for each dimension of the input + tensor. Must be: `[1, stride_height, stride_width, 1]`. + padding: A `string` from: `"SAME", "VALID"`. + The type of padding algorithm to use. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A `string`, only `"NHWC"` is currently supported. + dilations: A list of `ints` that has length `>= 4`. + The input stride for atrous morphological dilation. Must be: + `[1, rate_height, rate_width, 1]`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + if data_format != "NHWC": + raise ValueError("`data_format` values other than 'NHWC' are not " + f"supported. Received: data_format={data_format}") + + return gen_nn_ops.dilation2d(input=input, + filter=filters, + strides=strides, + rates=dilations, + padding=padding, + name=name) + + +@tf_export(v1=["nn.dilation2d"]) +@dispatch.add_dispatch_support +def dilation2d_v1( # pylint: disable=missing-docstring + input, # pylint: disable=redefined-builtin + filter=None, # pylint: disable=redefined-builtin + strides=None, + rates=None, + padding=None, + name=None, + filters=None, + dilations=None): + filter = deprecated_argument_lookup("filters", filters, "filter", filter) + rates = deprecated_argument_lookup("dilations", dilations, "rates", rates) + return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name) + + +dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__ + + +@tf_export("nn.with_space_to_batch") +@dispatch.add_dispatch_support +def with_space_to_batch( + input, # pylint: disable=redefined-builtin + dilation_rate, + padding, + op, + filter_shape=None, + spatial_dims=None, + data_format=None): + """Performs `op` on the space-to-batch representation of `input`. + + This has the effect of transforming sliding window operations into the + corresponding "atrous" operation in which the input is sampled at the + specified `dilation_rate`. + + In the special case that `dilation_rate` is uniformly 1, this simply returns: + + op(input, num_spatial_dims, padding) + + Otherwise, it returns: + + batch_to_space_nd( + op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings), + num_spatial_dims, + "VALID") + adjusted_dilation_rate, + adjusted_crops), + + where: + + adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)], + adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2] + + defined as follows: + + We first define two int64 tensors `paddings` and `crops` of shape + `[num_spatial_dims, 2]` based on the value of `padding` and the spatial + dimensions of the `input`: + + If `padding = "VALID"`, then: + + paddings, crops = required_space_to_batch_paddings( + input_shape[spatial_dims], + dilation_rate) + + If `padding = "SAME"`, then: + + dilated_filter_shape = + filter_shape + (filter_shape - 1) * (dilation_rate - 1) + + paddings, crops = required_space_to_batch_paddings( + input_shape[spatial_dims], + dilation_rate, + [(dilated_filter_shape - 1) // 2, + dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2]) + + Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial + dimensions are contiguous starting at the second dimension, but the specified + `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and + `crops` in order to be usable with these operations. For a given dimension, + if the block size is 1, and both the starting and ending padding and crop + amounts are 0, then space_to_batch_nd effectively leaves that dimension alone, + which is what is needed for dimensions not part of `spatial_dims`. + Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case + efficiently for any number of leading and trailing dimensions. + + For 0 <= i < len(spatial_dims), we assign: + + adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i] + adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :] + adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :] + + All unassigned values of `adjusted_dilation_rate` default to 1, while all + unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0. + + Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID" + padding is equivalent to specifying `padding = "SAME"` with a filter_shape of + `[1]*N`. + + Advanced usage. Note the following optimization: A sequence of + `with_space_to_batch` operations with identical (not uniformly 1) + `dilation_rate` parameters and "VALID" padding + + net = with_space_to_batch(net, dilation_rate, "VALID", op_1) + ... + net = with_space_to_batch(net, dilation_rate, "VALID", op_k) + + can be combined into a single `with_space_to_batch` operation as follows: + + def combined_op(converted_input, num_spatial_dims, _): + result = op_1(converted_input, num_spatial_dims, "VALID") + ... + result = op_k(result, num_spatial_dims, "VALID") + + net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) + + This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and + `batch_to_space_nd`. + + Similarly, a sequence of `with_space_to_batch` operations with identical (not + uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter + dimensions + + net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1) + ... + net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k) + + can be combined into a single `with_space_to_batch` operation as follows: + + def combined_op(converted_input, num_spatial_dims, _): + result = op_1(converted_input, num_spatial_dims, "SAME") + ... + result = op_k(result, num_spatial_dims, "SAME") + + net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) + + Args: + input: Tensor of rank > max(spatial_dims). + dilation_rate: int32 Tensor of *known* shape [num_spatial_dims]. + padding: str constant equal to "VALID" or "SAME" + op: Function that maps (input, num_spatial_dims, padding) -> output + filter_shape: If padding = "SAME", specifies the shape of the convolution + kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims]. + If padding = "VALID", filter_shape is ignored and need not be specified. + spatial_dims: Monotonically increasing sequence of `num_spatial_dims` + integers (which are >= 1) specifying the spatial dimensions of `input` + and output. Defaults to: `range(1, num_spatial_dims+1)`. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + + Returns: + The output Tensor as described above, dimensions will vary based on the op + provided. + + Raises: + ValueError: if `padding` is invalid or the arguments are incompatible. + ValueError: if `spatial_dims` are invalid. + """ + input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin + input_shape = input.shape + + def build_op(num_spatial_dims, padding): + return lambda inp, _: op(inp, num_spatial_dims, padding) + + new_op = _WithSpaceToBatch( + input_shape, + dilation_rate, + padding, + build_op, + filter_shape=filter_shape, + spatial_dims=spatial_dims, + data_format=data_format) + return new_op(input, None) + + +class _WithSpaceToBatch: + """Helper class for with_space_to_batch. + + Note that this class assumes that shapes of input and filter passed to + `__call__` are compatible with `input_shape`, `filter_shape`, and + `spatial_dims` passed to the constructor. + + Arguments + input_shape: static shape of input. i.e. input.shape. + dilation_rate: see `with_space_to_batch`. + padding: see `with_space_to_batch`. + build_op: Function that maps (num_spatial_dims, paddings) -> (function that + maps (input, filter) -> output). + filter_shape: see `with_space_to_batch`. + spatial_dims: `see with_space_to_batch`. + data_format: see `with_space_to_batch`. + num_batch_dims: (Optional). Number of batch dims in `input_shape`. + """ + + def __init__(self, + input_shape, + dilation_rate, + padding, + build_op, + filter_shape=None, + spatial_dims=None, + data_format=None, + num_batch_dims=1): + """Helper class for _with_space_to_batch.""" + dilation_rate = ops.convert_to_tensor( + dilation_rate, dtypes.int32, name="dilation_rate") + if dilation_rate.shape.ndims not in (None, 1): + raise ValueError( + "`dilation_rate.shape.rank` must be 1. Received: " + f"dilation_rate={dilation_rate} of rank {dilation_rate.shape.rank}") + + if not dilation_rate.shape.is_fully_defined(): + raise ValueError( + "`dilation_rate.shape` must be fully defined. Received: " + f"dilation_rate={dilation_rate} with shape " + f"{dilation_rate.shape}") + + num_spatial_dims = dilation_rate.shape.dims[0].value + + if data_format is not None and data_format.startswith("NC"): + starting_spatial_dim = num_batch_dims + 1 + else: + starting_spatial_dim = num_batch_dims + + if spatial_dims is None: + spatial_dims = range(starting_spatial_dim, + num_spatial_dims + starting_spatial_dim) + orig_spatial_dims = list(spatial_dims) + spatial_dims = sorted(set(int(x) for x in orig_spatial_dims)) + if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims): + raise ValueError( + "`spatial_dims` must be a monotonically increasing sequence of " + f"positive integers. Received: spatial_dims={orig_spatial_dims}") + + if data_format is not None and data_format.startswith("NC"): + expected_input_rank = spatial_dims[-1] + else: + expected_input_rank = spatial_dims[-1] + 1 + + try: + input_shape.with_rank_at_least(expected_input_rank) + except ValueError: + raise ValueError( + f"`input.shape.rank` must be at least {expected_input_rank}. " + f"Received: input.shape={input_shape} with rank {input_shape.rank}") + + const_rate = tensor_util.constant_value(dilation_rate) + rate_or_const_rate = dilation_rate + if const_rate is not None: + rate_or_const_rate = const_rate + if np.any(const_rate < 1): + raise ValueError( + "`dilation_rate` must be positive. " + f"Received: dilation_rate={const_rate}") + if np.all(const_rate == 1): + self.call = build_op(num_spatial_dims, padding) + return + + padding, explicit_paddings = convert_padding(padding) + + # We have two padding contributions. The first is used for converting "SAME" + # to "VALID". The second is required so that the height and width of the + # zero-padded value tensor are multiples of rate. + + # Padding required to reduce to "VALID" convolution + if padding == "SAME": + if filter_shape is None: + raise ValueError( + "`filter_shape` must be specified for `padding='SAME'`. " + f"Received: filter_shape={filter_shape} and padding={padding}") + filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape") + const_filter_shape = tensor_util.constant_value(filter_shape) + if const_filter_shape is not None: + filter_shape = const_filter_shape + self.base_paddings = _with_space_to_batch_base_paddings( + const_filter_shape, num_spatial_dims, rate_or_const_rate) + else: + self.num_spatial_dims = num_spatial_dims + self.rate_or_const_rate = rate_or_const_rate + self.base_paddings = None + elif padding == "VALID": + self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32) + elif padding == "EXPLICIT": + base_paddings = (np.array(explicit_paddings) + .reshape([num_spatial_dims + 2, 2])) + # Remove batch and channel dimensions + if data_format is not None and data_format.startswith("NC"): + self.base_paddings = base_paddings[2:] + else: + self.base_paddings = base_paddings[1:-1] + else: + raise ValueError("`padding` must be one of 'SAME' or 'VALID'. " + f"Received: padding={padding}") + + self.input_shape = input_shape + self.spatial_dims = spatial_dims + self.dilation_rate = dilation_rate + self.data_format = data_format + self.op = build_op(num_spatial_dims, "VALID") + self.call = self._with_space_to_batch_call + + def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin + """Call functionality for with_space_to_batch.""" + # Handle input whose shape is unknown during graph creation. + input_spatial_shape = None + input_shape = self.input_shape + spatial_dims = self.spatial_dims + if input_shape.ndims is not None: + input_shape_list = input_shape.as_list() + input_spatial_shape = [input_shape_list[i] for i in spatial_dims] + if input_spatial_shape is None or None in input_spatial_shape: + input_shape_tensor = array_ops.shape(inp) + input_spatial_shape = array_ops_stack.stack( + [input_shape_tensor[i] for i in spatial_dims]) + + base_paddings = self.base_paddings + if base_paddings is None: + # base_paddings could not be computed at build time since static filter + # shape was not fully defined. + filter_shape = array_ops.shape(filter) + base_paddings = _with_space_to_batch_base_paddings( + filter_shape, self.num_spatial_dims, self.rate_or_const_rate) + + paddings, crops = array_ops.required_space_to_batch_paddings( + input_shape=input_spatial_shape, + base_paddings=base_paddings, + block_shape=self.dilation_rate) + + dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1, + spatial_dims) + paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims) + crops = _with_space_to_batch_adjust(crops, 0, spatial_dims) + input_converted = array_ops.space_to_batch_nd( + input=inp, block_shape=dilation_rate, paddings=paddings) + + result = self.op(input_converted, filter) + + result_converted = array_ops.batch_to_space_nd( + input=result, block_shape=dilation_rate, crops=crops) + + # Recover channel information for output shape if channels are not last. + if self.data_format is not None and self.data_format.startswith("NC"): + if not result_converted.shape.dims[1].value and filter is not None: + output_shape = result_converted.shape.as_list() + output_shape[1] = filter.shape[-1] + result_converted.set_shape(output_shape) + + return result_converted + + def __call__(self, inp, filter): # pylint: disable=redefined-builtin + return self.call(inp, filter) + + +def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims, + rate_or_const_rate): + """Helper function to compute base_paddings.""" + # Spatial dimensions of the filters and the upsampled filters in which we + # introduce (rate - 1) zeros between consecutive filter values. + filter_spatial_shape = filter_shape[:num_spatial_dims] + pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate + + # When full_padding_shape is odd, we pad more at end, following the same + # convention as conv2d. + pad_extra_start = pad_extra_shape // 2 + pad_extra_end = pad_extra_shape - pad_extra_start + base_paddings = array_ops_stack.stack( + [[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)]) + return base_paddings + + +def _with_space_to_batch_adjust(orig, fill_value, spatial_dims): + """Returns an `adjusted` version of `orig` based on `spatial_dims`. + + Tensor of the same type as `orig` and with shape + `[max(spatial_dims), ...]` where: + + adjusted[spatial_dims[i] - 1, ...] = orig[i, ...] + + for 0 <= i < len(spatial_dims), and + + adjusted[j, ...] = fill_value + + for j != spatial_dims[i] - 1 for some i. + + If `orig` is a constant value, then the result will be a constant value. + + Args: + orig: Tensor of rank > max(spatial_dims). + fill_value: Numpy scalar (of same data type as `orig) specifying the fill + value for non-spatial dimensions. + spatial_dims: See with_space_to_batch. + + Returns: + `adjusted` tensor. + """ + fill_dims = orig.get_shape().as_list()[1:] + dtype = orig.dtype.as_numpy_dtype + parts = [] + const_orig = tensor_util.constant_value(orig) + const_or_orig = const_orig if const_orig is not None else orig + prev_spatial_dim = 0 + i = 0 + while i < len(spatial_dims): + start_i = i + start_spatial_dim = spatial_dims[i] + if start_spatial_dim > 1: + # Fill in any gap from the previous spatial dimension (or dimension 1 if + # this is the first spatial dimension) with `fill_value`. + parts.append( + np.full( + [start_spatial_dim - 1 - prev_spatial_dim] + fill_dims, + fill_value, + dtype=dtype)) + # Find the largest value of i such that: + # [spatial_dims[start_i], ..., spatial_dims[i]] + # == [start_spatial_dim, ..., start_spatial_dim + i - start_i], + # i.e. the end of a contiguous group of spatial dimensions. + while (i + 1 < len(spatial_dims) and + spatial_dims[i + 1] == spatial_dims[i] + 1): + i += 1 + parts.append(const_or_orig[start_i:i + 1]) + prev_spatial_dim = spatial_dims[i] + i += 1 + if const_orig is not None: + return np.concatenate(parts) + else: + return array_ops.concat(parts, 0) + + +def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate): + """Helper function for verifying strides and dilation_rate arguments. + + This is used by `convolution` and `pool`. + + Args: + num_spatial_dims: int + strides: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value + of strides is > 1, then all values of dilation_rate must be 1. + dilation_rate: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any + value of dilation_rate is > 1, then all values of strides must be 1. + + Returns: + Normalized (strides, dilation_rate) as int32 numpy arrays of shape + [num_spatial_dims]. + + Raises: + ValueError: if the parameters are invalid. + """ + if dilation_rate is None: + dilation_rate = [1] * num_spatial_dims + elif len(dilation_rate) != num_spatial_dims: + raise ValueError(f"`len(dilation_rate)` should be {num_spatial_dims}. " + f"Received: dilation_rate={dilation_rate} of length " + f"{len(dilation_rate)}") + dilation_rate = np.array(dilation_rate, dtype=np.int32) + if np.any(dilation_rate < 1): + raise ValueError("all values of `dilation_rate` must be positive. " + f"Received: dilation_rate={dilation_rate}") + + if strides is None: + strides = [1] * num_spatial_dims + elif len(strides) != num_spatial_dims: + raise ValueError(f"`len(strides)` should be {num_spatial_dims}. " + f"Received: strides={strides} of length {len(strides)}") + strides = np.array(strides, dtype=np.int32) + if np.any(strides < 1): + raise ValueError("all values of `strides` must be positive. " + f"Received: strides={strides}") + + if np.any(strides > 1) and np.any(dilation_rate > 1): + raise ValueError( + "`strides > 1` not supported in conjunction with `dilation_rate > 1`. " + f"Received: strides={strides} and dilation_rate={dilation_rate}") + return strides, dilation_rate + + +@tf_export(v1=["nn.convolution"]) +@dispatch.add_dispatch_support +def convolution( + input, # pylint: disable=redefined-builtin + filter, # pylint: disable=redefined-builtin + padding, + strides=None, + dilation_rate=None, + name=None, + data_format=None, + filters=None, + dilations=None): # pylint: disable=g-doc-args + """Computes sums of N-D convolutions (actually cross-correlation). + + This also supports either output striding via the optional `strides` parameter + or atrous convolution (also known as convolution with holes or dilated + convolution, based on the French word "trous" meaning holes in English) via + the optional `dilation_rate` parameter. Currently, however, output striding + is not supported for atrous convolutions. + + Specifically, in the case that `data_format` does not start with "NC", given + a rank (N+2) `input` Tensor of shape + + [num_batches, + input_spatial_shape[0], + ..., + input_spatial_shape[N-1], + num_input_channels], + + a rank (N+2) `filter` Tensor of shape + + [spatial_filter_shape[0], + ..., + spatial_filter_shape[N-1], + num_input_channels, + num_output_channels], + + an optional `dilation_rate` tensor of shape N (defaults to `[1]*N`) specifying + the filter upsampling/input downsampling rate, and an optional list of N + `strides` (defaults to `[1]*N`), this computes for each N-D spatial output + position `(x[0], ..., x[N-1])`: + + ``` + output[b, x[0], ..., x[N-1], k] = + sum_{z[0], ..., z[N-1], q} + filter[z[0], ..., z[N-1], q, k] * + padded_input[b, + x[0]*strides[0] + dilation_rate[0]*z[0], + ..., + x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1], + q] + ``` + + where b is the index into the batch, k is the output channel number, q is the + input channel number, and z is the N-D spatial offset within the filter. Here, + `padded_input` is obtained by zero padding the input using an effective + spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and + output striding `strides`. + + In the case that `data_format` does start with `"NC"`, the `input` and output + (but not the `filter`) are simply transposed as follows: + + ```python + convolution(input, data_format, **kwargs) = + tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]), + **kwargs), + [0, N+1] + range(1, N+1)) + ``` + + It is required that 1 <= N <= 3. + + Args: + input: An (N+2)-D `Tensor` of type `T`, of shape + `[batch_size] + input_spatial_shape + [in_channels]` if data_format does + not start with "NC" (default), or + `[batch_size, in_channels] + input_spatial_shape` if data_format starts + with "NC". + filter: An (N+2)-D `Tensor` with the same type as `input` and shape + `spatial_filter_shape + [in_channels, out_channels]`. + padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm. + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input when the strides are 1. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + strides: Optional. Sequence of N ints >= 1. Specifies the output stride. + Defaults to `[1]*N`. If any value of strides is > 1, then all values of + dilation_rate must be 1. + dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter + upsampling/input downsampling rate. In the literature, the same parameter + is sometimes called `input stride` or `dilation`. The effective filter + size used for the convolution will be `spatial_filter_shape + + (spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting + (dilation_rate[i]-1) zeros between consecutive elements of the original + filter in each spatial dimension i. If any value of dilation_rate is > 1, + then all values of strides must be 1. + name: Optional name for the returned tensor. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + + Returns: + A `Tensor` with the same type as `input` of shape + + `[batch_size] + output_spatial_shape + [out_channels]` + + if data_format is None or does not start with "NC", or + + `[batch_size, out_channels] + output_spatial_shape` + + if data_format starts with "NC", + where `output_spatial_shape` depends on the value of `padding`. + + If padding == "SAME": + output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) + + If padding == "VALID": + output_spatial_shape[i] = + ceil((input_spatial_shape[i] - + (spatial_filter_shape[i]-1) * dilation_rate[i]) + / strides[i]). + + Raises: + ValueError: If input/output depth does not match `filter` shape, if padding + is other than `"VALID"` or `"SAME"`, or if data_format is invalid. + + """ + filter = deprecated_argument_lookup("filters", filters, "filter", filter) + dilation_rate = deprecated_argument_lookup( + "dilations", dilations, "dilation_rate", dilation_rate) + return convolution_internal( + input, + filter, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilation_rate, + name=name) + + +@tf_export("nn.convolution", v1=[]) +@dispatch.add_dispatch_support +def convolution_v2( # pylint: disable=missing-docstring + input, # pylint: disable=redefined-builtin + filters, + strides=None, + padding="VALID", + data_format=None, + dilations=None, + name=None): + return convolution_internal( + input, # pylint: disable=redefined-builtin + filters, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + + +convolution_v2.__doc__ = deprecation.rewrite_argument_docstring( + deprecation.rewrite_argument_docstring( + convolution.__doc__, "dilation_rate", "dilations"), + "filter", "filters") + + +def convolution_internal( + input, # pylint: disable=redefined-builtin + filters, + strides=None, + padding="VALID", + data_format=None, + dilations=None, + name=None, + call_from_convolution=True, + num_spatial_dims=None): + """Internal function which performs rank agnostic convolution. + + Args: + input: See `convolution`. + filters: See `convolution`. + strides: See `convolution`. + padding: See `convolution`. + data_format: See `convolution`. + dilations: See `convolution`. + name: See `convolution`. + call_from_convolution: See `convolution`. + num_spatial_dims: (Optional.). It is a integer describing the + rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions, + the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. + This argument is only required to disambiguate the rank of `batch_shape` + when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For + backwards compatibility, if `num_spatial_dims is None` and + `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be + `1` (i.e., the input is expected to be + `[batch_size, num_channels] + input_spatial_shape` + or `[batch_size] + input_spatial_shape + [num_channels]`. + + Returns: + A tensor of shape and dtype matching that of `input`. + + Raises: + ValueError: If input and filter both have unknown shapes, or if + `num_spatial_dims` is provided and incompatible with the value + estimated from `filters.shape`. + """ + if (not isinstance(filters, variables_lib.Variable) and + not tensor_util.is_tf_type(filters)): + with ops.name_scope("convolution_internal", None, [filters, input]): + filters = ops.convert_to_tensor(filters, name='filters') + if (not isinstance(input, tensor_lib.Tensor) and not tensor_util.is_tf_type( + input)): + with ops.name_scope("convolution_internal", None, [filters, input]): + input = ops.convert_to_tensor(input, name="input") + + filters_rank = filters.shape.rank + inputs_rank = input.shape.rank + if num_spatial_dims is None: + if filters_rank: + num_spatial_dims = filters_rank - 2 + elif inputs_rank: + num_spatial_dims = inputs_rank - 2 + else: + raise ValueError( + "When `num_spatial_dims` is not set, one of `input.shape.rank` or " + "`filters.shape.rank` must be known. " + f"Received: input.shape={input.shape} of rank {inputs_rank} and " + f"filters.shape={filters.shape} of rank {filters_rank}") + elif filters_rank and filters_rank - 2 != num_spatial_dims: + raise ValueError( + "`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: " + f"filters.shape={filters.shape} of rank {filters_rank} and " + f"num_spatial_dims={num_spatial_dims}") + + if inputs_rank: + num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension. + else: + num_batch_dims = 1 # By default, assume single batch dimension. + + if num_spatial_dims not in {1, 2, 3}: + raise ValueError( + "`num_spatial_dims` must be 1, 2, or 3. " + f"Received: num_spatial_dims={num_spatial_dims}.") + + if data_format is None or data_format in _CHANNELS_LAST_FORMATS: + channel_index = num_batch_dims + num_spatial_dims + else: + channel_index = num_batch_dims + + if dilations is None: + dilations = _get_sequence(dilations, num_spatial_dims, channel_index, + "dilations") + is_dilated_conv = False + else: + dilations = _get_sequence(dilations, num_spatial_dims, channel_index, + "dilations") + is_dilated_conv = any(i != 1 for i in dilations) + + strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides") + has_tpu_context = device_context.enclosing_tpu_context() is not None + + if name: + default_name = None + elif not has_tpu_context or call_from_convolution: + default_name = "convolution" + elif num_spatial_dims == 2: # Most common case. + default_name = "Conv2D" + elif num_spatial_dims == 3: + default_name = "Conv3D" + else: + default_name = "conv1d" + + with ops.name_scope(name, default_name, [input, filters]) as name: + # Fast path for TPU or if no dilation, as gradient only supported on TPU + # for dilations. + if not is_dilated_conv or has_tpu_context: + if num_spatial_dims == 2: # Most common case. + op = _conv2d_expanded_batch + elif num_spatial_dims == 3: + op = _conv3d_expanded_batch + else: + op = conv1d + + return op( + input, + filters, + strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + else: + if channel_index == 1: + strides = strides[2:] + dilations = dilations[2:] + else: + strides = strides[1:-1] + dilations = dilations[1:-1] + + op = Convolution( + tensor_shape.as_shape(input.shape), + tensor_shape.as_shape(filters.shape), + padding, + strides=strides, + dilation_rate=dilations, + name=name, + data_format=data_format, + num_spatial_dims=num_spatial_dims) + return op(input, filters) + + +class Convolution: + """Helper class for convolution. + + Note that this class assumes that shapes of input and filter passed to + `__call__` are compatible with `input_shape`, `filter_shape`, and + `num_spatial_dims` passed to the constructor. + + Arguments + input_shape: static shape of input. i.e. input.shape. Its length is + `batch_shape + input_spatial_shape + [num_channels]` if `data_format` + does not start with `NC`, or + `batch_shape + [num_channels] + input_spatial_shape` if `data_format` + starts with `NC`. + filter_shape: static shape of the filter. i.e. filter.shape. + padding: The padding algorithm, must be "SAME" or "VALID". + strides: see convolution. + dilation_rate: see convolution. + name: see convolution. + data_format: A string or `None`. Specifies whether the channel dimension of + the `input` and output is the last dimension (if `data_format` is `None` + or does not start with `NC`), or the first post-batch dimension (i.e. if + `data_format` starts with `NC`). + num_spatial_dims: (Usually optional.) Python integer, the rank of the + spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions, + the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. + This argument is only required to disambiguate the rank of `batch_shape` + when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For + backwards compatibility, if `num_spatial_dims is None` and + `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be + `1` (i.e., the input is expected to be + `[batch_size, num_channels] + input_spatial_shape` + or `[batch_size] + input_spatial_shape + [num_channels]`. + """ + + def __init__(self, + input_shape, + filter_shape, + padding, + strides=None, + dilation_rate=None, + name=None, + data_format=None, + num_spatial_dims=None): + """Helper function for convolution.""" + num_batch_dims = None + filter_shape = tensor_shape.as_shape(filter_shape) + input_shape = tensor_shape.as_shape(input_shape) + + if filter_shape.ndims is not None: + if (num_spatial_dims is not None and + filter_shape.ndims != num_spatial_dims + 2): + raise ValueError( + "`filters.shape.rank` must be `num_spatial_dims + 2`. Received: " + f"filters.shape={filter_shape} of rank {filter_shape.rank} and " + f"num_spatial_dims={num_spatial_dims}") + else: + num_spatial_dims = filter_shape.ndims - 2 + + if input_shape.ndims is not None and num_spatial_dims is not None: + num_batch_dims = input_shape.ndims - num_spatial_dims - 1 + + if num_spatial_dims is None: + num_spatial_dims = input_shape.ndims - 2 + else: + if input_shape.ndims is not None: + if input_shape.ndims < num_spatial_dims + 2: + raise ValueError( + "`input.shape.rank` must be >= than `num_spatial_dims + 2`. " + f"Received: input.shape={input_shape} of rank {input_shape.rank} " + f"and num_spatial_dims={num_spatial_dims}") + else: + if num_batch_dims is None: + num_batch_dims = input_shape.ndims - num_spatial_dims - 1 + + if num_spatial_dims is None: + raise ValueError( + "When `num_spatial_dims` is not set, one of `input.shape.rank` or " + "`filters.shape.rank` must be known. " + f"Received: input.shape={input_shape} of rank {input_shape.rank} and " + f"`filters.shape={filter_shape}` of rank {filter_shape.rank}") + + if num_batch_dims is None: + num_batch_dims = 1 + + if num_batch_dims < 1: + raise ValueError( + f"Batch dims should be >= 1, but found {num_batch_dims}. " + "Batch dims was estimated as " + "`input.shape.rank - num_spatial_dims - 1` and `num_spatial_dims` " + "was either provided or estimated as `filters.shape.rank - 2`. " + f"Received: input.shape={input_shape} of rank {input_shape.rank}, " + f"filters.shape={filter_shape} of rank {filter_shape.rank}, and " + f"num_spatial_dims={num_spatial_dims}") + + if data_format is None or not data_format.startswith("NC"): + input_channels_dim = tensor_shape.dimension_at_index( + input_shape, num_spatial_dims + num_batch_dims) + spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims) + else: + input_channels_dim = tensor_shape.dimension_at_index( + input_shape, num_batch_dims) + spatial_dims = range( + num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1) + + filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims) + if not (input_channels_dim % filter_dim).is_compatible_with(0): + raise ValueError( + "The number of input channels is not divisible by the corresponding " + f"number of output filters. Received: input.shape={input_shape} with " + f"{input_channels_dim} channels and filters.shape={filter_shape} " + f"with {filter_dim} output filters.") + + strides, dilation_rate = _get_strides_and_dilation_rate( + num_spatial_dims, strides, dilation_rate) + + self.input_shape = input_shape + self.filter_shape = filter_shape + self.data_format = data_format + self.strides = strides + self.padding = padding + self.name = name + self.dilation_rate = dilation_rate + self.num_batch_dims = num_batch_dims + self.num_spatial_dims = num_spatial_dims + self.conv_op = _WithSpaceToBatch( + input_shape, + dilation_rate=dilation_rate, + padding=padding, + build_op=self._build_op, + filter_shape=filter_shape, + spatial_dims=spatial_dims, + data_format=data_format, + num_batch_dims=num_batch_dims) + + def _build_op(self, _, padding): + return _NonAtrousConvolution( + self.input_shape, + filter_shape=self.filter_shape, + padding=padding, + data_format=self.data_format, + strides=self.strides, + name=self.name, + num_batch_dims=self.num_batch_dims) + + def __call__(self, inp, filter): # pylint: disable=redefined-builtin + # TPU convolution supports dilations greater than 1. + if device_context.enclosing_tpu_context() is not None: + return convolution_internal( + inp, + filter, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilation_rate, + name=self.name, + call_from_convolution=False, + num_spatial_dims=self.num_spatial_dims) + else: + return self.conv_op(inp, filter) + + +@tf_export(v1=["nn.pool"]) +@dispatch.add_dispatch_support +def pool( + input, # pylint: disable=redefined-builtin + window_shape, + pooling_type, + padding, + dilation_rate=None, + strides=None, + name=None, + data_format=None, + dilations=None): + """Performs an N-D pooling operation. + + In the case that `data_format` does not start with "NC", computes for + 0 <= b < batch_size, + 0 <= x[i] < output_spatial_shape[i], + 0 <= c < num_channels: + + ``` + output[b, x[0], ..., x[N-1], c] = + REDUCE_{z[0], ..., z[N-1]} + input[b, + x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], + ... + x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], + c], + ``` + + where the reduction function REDUCE depends on the value of `pooling_type`, + and pad_before is defined based on the value of `padding` as described in + the "returns" section of `tf.nn.convolution` for details. + The reduction never includes out-of-bounds positions. + + In the case that `data_format` starts with `"NC"`, the `input` and output are + simply transposed as follows: + + ```python + pool(input, data_format, **kwargs) = + tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), + **kwargs), + [0, N+1] + range(1, N+1)) + ``` + + Args: + input: Tensor of rank N+2, of shape + `[batch_size] + input_spatial_shape + [num_channels]` if data_format does + not start with "NC" (default), or + `[batch_size, num_channels] + input_spatial_shape` if data_format starts + with "NC". Pooling happens over the spatial dimensions only. + window_shape: Sequence of N ints >= 1. + pooling_type: Specifies pooling operation, must be "AVG" or "MAX". + padding: The padding algorithm, must be "SAME" or "VALID". + See the "returns" section of `tf.nn.convolution` for details. + dilation_rate: Optional. Dilation rate. List of N ints >= 1. + Defaults to `[1]*N`. If any value of dilation_rate is > 1, then all + values of strides must be 1. + strides: Optional. Sequence of N ints >= 1. Defaults to `[1]*N`. + If any value of strides is > 1, then all values of dilation_rate must be + 1. + name: Optional. Name of the op. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations: Alias for dilation_rate + + Returns: + Tensor of rank N+2, of shape + [batch_size] + output_spatial_shape + [num_channels] + + if data_format is None or does not start with "NC", or + + [batch_size, num_channels] + output_spatial_shape + + if data_format starts with "NC", + where `output_spatial_shape` depends on the value of padding: + + If padding = "SAME": + output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) + + If padding = "VALID": + output_spatial_shape[i] = + ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) + / strides[i]). + + Raises: + ValueError: if arguments are invalid. + + """ + dilation_rate = deprecated_argument_lookup( + "dilations", dilations, "dilation_rate", dilation_rate) + # pylint: enable=line-too-long + with ops.name_scope(name, "%s_pool" % (pooling_type.lower()), + [input]) as scope: + input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin + + num_spatial_dims = len(window_shape) + if num_spatial_dims < 1 or num_spatial_dims > 3: + raise ValueError("`len(window_shape)` must be 1, 2, or 3. Received: " + f"window_shape={window_shape} of length " + f"{len(window_shape)}") + + input.get_shape().with_rank(num_spatial_dims + 2) + + strides, dilation_rate = _get_strides_and_dilation_rate( + num_spatial_dims, strides, dilation_rate) + + if padding == "SAME" and np.any(dilation_rate > 1): + raise ValueError( + "pooling with 'SAME' padding is not implemented for " + f"`dilation_rate` > 1. Received: padding={padding} and " + f"dilation_rate={dilation_rate}") + + if np.any(strides > window_shape): + raise ValueError( + "`strides` > `window_shape` not supported due to inconsistency " + f"between CPU and GPU implementations. Received: strides={strides} " + f"and window_shape={window_shape}") + + pooling_ops = { + ("MAX", 1): max_pool, + ("MAX", 2): max_pool, + ("MAX", 3): max_pool3d, # pylint: disable=undefined-variable + ("AVG", 1): avg_pool, + ("AVG", 2): avg_pool, + ("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable + } + op_key = (pooling_type, num_spatial_dims) + if op_key not in pooling_ops: + raise ValueError( + f"{num_spatial_dims}-D {pooling_type} pooling is not supported.") + + if data_format is None or not data_format.startswith("NC"): + adjusted_window_shape = [1] + list(window_shape) + [1] + adjusted_strides = [1] + list(strides) + [1] + spatial_dims = range(1, num_spatial_dims + 1) + else: + adjusted_window_shape = [1, 1] + list(window_shape) + adjusted_strides = [1, 1] + list(strides) + spatial_dims = range(2, num_spatial_dims + 2) + + if num_spatial_dims == 1: + if data_format is None or data_format == "NWC": + data_format_kwargs = dict(data_format="NHWC") + elif data_format == "NCW": + data_format_kwargs = dict(data_format="NCHW") + else: + raise ValueError("data_format must be either 'NWC' or 'NCW'. " + f"Received: data_format={data_format}") + adjusted_window_shape = [1] + adjusted_window_shape + adjusted_strides = [1] + adjusted_strides + else: + data_format_kwargs = dict(data_format=data_format) + + def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring + if num_spatial_dims == 1: + converted_input = array_ops.expand_dims(converted_input, + spatial_dims[0]) + result = pooling_ops[op_key]( + converted_input, + adjusted_window_shape, + adjusted_strides, + converted_padding, + name=scope, + **data_format_kwargs) + if num_spatial_dims == 1: + result = array_ops.squeeze(result, [spatial_dims[0]]) + return result + + return with_space_to_batch( + input=input, + dilation_rate=dilation_rate, + padding=padding, + op=op, + spatial_dims=spatial_dims, + filter_shape=window_shape) + + +@tf_export("nn.pool", v1=[]) +@dispatch.add_dispatch_support +def pool_v2( + input, # pylint: disable=redefined-builtin + window_shape, + pooling_type, + strides=None, + padding="VALID", + data_format=None, + dilations=None, + name=None): + # pylint: disable=line-too-long + """Performs an N-D pooling operation. + + In the case that `data_format` does not start with "NC", computes for + 0 <= b < batch_size, + 0 <= x[i] < output_spatial_shape[i], + 0 <= c < num_channels: + + ``` + output[b, x[0], ..., x[N-1], c] = + REDUCE_{z[0], ..., z[N-1]} + input[b, + x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], + ... + x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], + c], + ``` + + where the reduction function REDUCE depends on the value of `pooling_type`, + and pad_before is defined based on the value of `padding` as described in + the "returns" section of `tf.nn.convolution` for details. + The reduction never includes out-of-bounds positions. + + In the case that `data_format` starts with `"NC"`, the `input` and output are + simply transposed as follows: + + ```python + pool(input, data_format, **kwargs) = + tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), + **kwargs), + [0, N+1] + range(1, N+1)) + ``` + + Args: + input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + + [num_channels]` if data_format does not start with "NC" (default), or + `[batch_size, num_channels] + input_spatial_shape` if data_format starts + with "NC". Pooling happens over the spatial dimensions only. + window_shape: Sequence of N ints >= 1. + pooling_type: Specifies pooling operation, must be "AVG" or "MAX". + strides: Optional. Sequence of N ints >= 1. Defaults to `[1]*N`. If any value of + strides is > 1, then all values of dilation_rate must be 1. + padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". + See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For + N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to + `[1]*N`. If any value of dilation_rate is > 1, then all values of strides + must be 1. + name: Optional. Name of the op. + + Returns: + Tensor of rank N+2, of shape + [batch_size] + output_spatial_shape + [num_channels] + + if data_format is None or does not start with "NC", or + + [batch_size, num_channels] + output_spatial_shape + + if data_format starts with "NC", + where `output_spatial_shape` depends on the value of padding: + + If padding = "SAME": + output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) + + If padding = "VALID": + output_spatial_shape[i] = + ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) + / strides[i]). + + Raises: + ValueError: if arguments are invalid. + """ + return pool( + input=input, + window_shape=window_shape, + pooling_type=pooling_type, + padding=padding, + dilation_rate=dilations, + strides=strides, + name=name, + data_format=data_format) + + +@tf_export("nn.atrous_conv2d") +@dispatch.add_dispatch_support +def atrous_conv2d(value, filters, rate, padding, name=None): + """Atrous convolution (a.k.a. convolution with holes or dilated convolution). + + This function is a simpler wrapper around the more general + `tf.nn.convolution`, and exists only for backwards compatibility. You can + use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution. + + Computes a 2-D atrous convolution, also known as convolution with holes or + dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` + parameter is equal to one, it performs regular 2-D convolution. If the `rate` + parameter is greater than one, it performs convolution with holes, sampling + the input values every `rate` pixels in the `height` and `width` dimensions. + This is equivalent to convolving the input with a set of upsampled filters, + produced by inserting `rate - 1` zeros between two consecutive values of the + filters along the `height` and `width` dimensions, hence the name atrous + convolution or convolution with holes (the French word trous means holes in + English). + + More specifically: + + ``` + output[batch, height, width, out_channel] = + sum_{dheight, dwidth, in_channel} ( + filters[dheight, dwidth, in_channel, out_channel] * + value[batch, height + rate*dheight, width + rate*dwidth, in_channel] + ) + ``` + + Atrous convolution allows us to explicitly control how densely to compute + feature responses in fully convolutional networks. Used in conjunction with + bilinear interpolation, it offers an alternative to `conv2d_transpose` in + dense prediction tasks such as semantic image segmentation, optical flow + computation, or depth estimation. It also allows us to effectively enlarge + the field of view of filters without increasing the number of parameters or + the amount of computation. + + For a description of atrous convolution and how it can be used for dense + feature extraction, please see: (Chen et al., 2015). The same operation is + investigated further in (Yu et al., 2016). Previous works that effectively + use atrous convolution in different ways are, among others, + (Sermanet et al., 2014) and (Giusti et al., 2013). + Atrous convolution is also closely related to the so-called noble identities + in multi-rate signal processing. + + There are many different ways to implement atrous convolution (see the refs + above). The implementation here reduces + + ```python + atrous_conv2d(value, filters, rate, padding=padding) + ``` + + to the following three operations: + + ```python + paddings = ... + net = space_to_batch(value, paddings, block_size=rate) + net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID") + crops = ... + net = batch_to_space(net, crops, block_size=rate) + ``` + + Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` + operations with identical `rate` parameters, 'SAME' `padding`, and filters + with odd heights/ widths: + + ```python + net = atrous_conv2d(net, filters1, rate, padding="SAME") + net = atrous_conv2d(net, filters2, rate, padding="SAME") + ... + net = atrous_conv2d(net, filtersK, rate, padding="SAME") + ``` + + can be equivalently performed cheaper in terms of computation and memory as: + + ```python + pad = ... # padding so that the input dims are multiples of rate + net = space_to_batch(net, paddings=pad, block_size=rate) + net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") + net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") + ... + net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") + net = batch_to_space(net, crops=pad, block_size=rate) + ``` + + because a pair of consecutive `space_to_batch` and `batch_to_space` ops with + the same `block_size` cancel out when their respective `paddings` and `crops` + inputs are identical. + + Args: + value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC" + format. Its shape is `[batch, in_height, in_width, in_channels]`. + filters: A 4-D `Tensor` with the same type as `value` and shape + `[filter_height, filter_width, in_channels, out_channels]`. `filters`' + `in_channels` dimension must match that of `value`. Atrous convolution is + equivalent to standard convolution with upsampled filters with effective + height `filter_height + (filter_height - 1) * (rate - 1)` and effective + width `filter_width + (filter_width - 1) * (rate - 1)`, produced by + inserting `rate - 1` zeros along consecutive elements across the + `filters`' spatial dimensions. + rate: A positive int32. The stride with which we sample input values across + the `height` and `width` dimensions. Equivalently, the rate by which we + upsample the filter values by inserting zeros across the `height` and + `width` dimensions. In the literature, the same parameter is sometimes + called `input stride` or `dilation`. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + name: Optional name for the returned tensor. + + Returns: + A `Tensor` with the same type as `value`. + Output shape with `'VALID'` padding is: + + [batch, height - rate * (filter_width - 1), + width - rate * (filter_height - 1), out_channels]. + + Output shape with `'SAME'` padding is: + + [batch, height, width, out_channels]. + + Raises: + ValueError: If input/output depth does not match `filters`' shape, or if + padding is other than `'VALID'` or `'SAME'`. + + References: + Multi-Scale Context Aggregation by Dilated Convolutions: + [Yu et al., 2016](https://arxiv.org/abs/1511.07122) + ([pdf](https://arxiv.org/pdf/1511.07122.pdf)) + Semantic Image Segmentation with Deep Convolutional Nets and Fully + Connected CRFs: + [Chen et al., 2015](http://arxiv.org/abs/1412.7062) + ([pdf](https://arxiv.org/pdf/1412.7062)) + OverFeat - Integrated Recognition, Localization and Detection using + Convolutional Networks: + [Sermanet et al., 2014](https://arxiv.org/abs/1312.6229) + ([pdf](https://arxiv.org/pdf/1312.6229.pdf)) + Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks: + [Giusti et al., 2013] + (https://ieeexplore.ieee.org/abstract/document/6738831) + ([pdf](https://arxiv.org/pdf/1302.1700.pdf)) + """ + return convolution( + input=value, + filter=filters, + padding=padding, + dilation_rate=np.broadcast_to(rate, (2,)), + name=name) + + +def convert_padding(padding, expected_length=4): + """Converts Python padding to C++ padding for ops which take EXPLICIT padding. + + Args: + padding: the `padding` argument for a Python op which supports EXPLICIT + padding. + expected_length: Expected number of entries in the padding list when + explicit padding is used. + + Returns: + (padding, explicit_paddings) pair, which should be passed as attributes to a + C++ op. + + Raises: + ValueError: If padding is invalid. + """ + explicit_paddings = [] + if padding == "EXPLICIT": + raise ValueError("'EXPLICIT' is not a valid value for `padding`. To use " + "explicit padding, `padding` must be a list.") + if isinstance(padding, (list, tuple)): + for i, dim_paddings in enumerate(padding): + if not isinstance(dim_paddings, (list, tuple)): + raise ValueError("When `padding` is a list, each element of `padding` " + "must be a list/tuple of size 2. Received: " + f"padding={padding} with element at index {i} of type " + f"{type(dim_paddings)}") + if len(dim_paddings) != 2: + raise ValueError("When `padding` is a list, each element of `padding` " + "must be a list/tuple of size 2. Received: " + f"padding={padding} with element at index {i} of size " + f"{len(dim_paddings)}") + explicit_paddings.extend(dim_paddings) + if len(padding) != expected_length: + raise ValueError( + f"When padding is a list, it must be of size {expected_length}. " + f"Received: padding={padding} of size {len(padding)}") + padding = "EXPLICIT" + return padding, explicit_paddings + + +@tf_export(v1=["nn.conv1d"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_arg_values( + None, + "`NCHW` for data_format is deprecated, use `NCW` instead", + warn_once=True, + data_format="NCHW") +@deprecation.deprecated_arg_values( + None, + "`NHWC` for data_format is deprecated, use `NWC` instead", + warn_once=True, + data_format="NHWC") +def conv1d( + value=None, + filters=None, + stride=None, + padding=None, + use_cudnn_on_gpu=None, + data_format=None, + name=None, + input=None, # pylint: disable=redefined-builtin + dilations=None): + r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter. + + Given an input tensor of shape + `batch_shape + [in_width, in_channels]` + if `data_format` is `"NWC"`, or + `batch_shape + [in_channels, in_width]` + if `data_format` is `"NCW"`, + and a filter / kernel tensor of shape + `[filter_width, in_channels, out_channels]`, this op reshapes + the arguments to pass them to `conv2d` to perform the equivalent + convolution operation. + + Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. + For example, if `data_format` does not start with "NC", a tensor of shape + `batch_shape + [in_width, in_channels]` + is reshaped to + `batch_shape + [1, in_width, in_channels]`, + and the filter is reshaped to + `[1, filter_width, in_channels, out_channels]`. + The result is then reshaped back to + `batch_shape + [out_width, out_channels]` + \(where out_width is a function of the stride and padding as in conv2d\) and + returned to the caller. + + Args: + value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or + `float64`. + filters: A Tensor of rank at least 3. Must have the same type as `value`. + stride: An int or list of `ints` that has length `1` or `3`. The number of + entries by which the filter is moved right at each step. + padding: 'SAME' or 'VALID' + use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. + data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, + the data is stored in the order of `batch_shape + [in_width, + in_channels]`. The `"NCW"` format stores data as `batch_shape + + [in_channels, in_width]`. + name: A name for the operation (optional). + input: Alias for value. + dilations: An int or list of `ints` that has length `1` or `3` which + defaults to 1. The dilation factor for each dimension of input. If set to + k > 1, there will be k-1 skipped cells between each filter element on that + dimension. Dilations in the batch and depth dimensions must be 1. + + Returns: + A `Tensor`. Has the same type as input. + + Raises: + ValueError: if `data_format` is invalid. + """ + value = deprecation.deprecated_argument_lookup("input", input, "value", value) + with ops.name_scope(name, "conv1d", [value, filters]) as name: + # Reshape the input tensor to batch_shape + [1, in_width, in_channels] + if data_format is None or data_format == "NHWC" or data_format == "NWC": + data_format = "NHWC" + spatial_start_dim = -3 + channel_index = 2 + elif data_format == "NCHW" or data_format == "NCW": + data_format = "NCHW" + spatial_start_dim = -2 + channel_index = 1 + else: + raise ValueError("`data_format` must be 'NWC' or 'NCW'. " + f"Received: data_format={data_format}") + strides = [1] + _get_sequence(stride, 1, channel_index, "stride") + dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") + + value = array_ops.expand_dims(value, spatial_start_dim) + filters = array_ops.expand_dims(filters, 0) + if value.shape.ndims in (4, 3, 2, 1, 0, None): + result = gen_nn_ops.conv2d( + value, + filters, + strides, + padding, + use_cudnn_on_gpu=use_cudnn_on_gpu, + data_format=data_format, + dilations=dilations, + name=name) + else: + result = squeeze_batch_dims( + value, + functools.partial( + gen_nn_ops.conv2d, + filter=filters, + strides=strides, + padding=padding, + use_cudnn_on_gpu=use_cudnn_on_gpu, + data_format=data_format, + dilations=dilations, + ), + inner_rank=3, + name=name) + return array_ops.squeeze(result, [spatial_start_dim]) + + +@tf_export("nn.conv1d", v1=[]) +@dispatch.add_dispatch_support +def conv1d_v2( + input, # pylint: disable=redefined-builtin + filters, + stride, + padding, + data_format="NWC", + dilations=None, + name=None): + r"""Computes a 1-D convolution given 3-D input and filter tensors. + + Given an input tensor of shape + `batch_shape + [in_width, in_channels]` + if `data_format` is `"NWC"`, or + `batch_shape + [in_channels, in_width]` + if `data_format` is `"NCW"`, + and a filter / kernel tensor of shape + `[filter_width, in_channels, out_channels]`, this op reshapes + the arguments to pass them to `conv2d` to perform the equivalent + convolution operation. + + Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. + For example, if `data_format` does not start with `"NC"`, a tensor of shape + `batch_shape + [in_width, in_channels]` + is reshaped to + `batch_shape + [1, in_width, in_channels]`, + and the filter is reshaped to + `[1, filter_width, in_channels, out_channels]`. + The result is then reshaped back to + `batch_shape + [out_width, out_channels]` + \(where out_width is a function of the stride and padding as in conv2d\) and + returned to the caller. + + Args: + input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or + `float64`. + filters: A Tensor of rank at least 3. Must have the same type as `input`. + stride: An int or list of `ints` that has length `1` or `3`. The number of + entries by which the filter is moved right at each step. + padding: 'SAME' or 'VALID'. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, + the data is stored in the order of + `batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data + as `batch_shape + [in_channels, in_width]`. + dilations: An int or list of `ints` that has length `1` or `3` which + defaults to 1. The dilation factor for each dimension of input. If set to + k > 1, there will be k-1 skipped cells between each filter element on that + dimension. Dilations in the batch and depth dimensions must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as input. + + Raises: + ValueError: if `data_format` is invalid. + """ + return conv1d( + input, # pylint: disable=redefined-builtin + filters, + stride, + padding, + use_cudnn_on_gpu=True, + data_format=data_format, + name=name, + dilations=dilations) + + +@tf_export("nn.conv1d_transpose") +@dispatch.add_dispatch_support +def conv1d_transpose( + input, # pylint: disable=redefined-builtin + filters, + output_shape, + strides, + padding="SAME", + data_format="NWC", + dilations=None, + name=None): + """The transpose of `conv1d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d` + rather than an actual deconvolution. + + Args: + input: A 3-D `Tensor` of type `float` and shape + `[batch, in_width, in_channels]` for `NWC` data format or + `[batch, in_channels, in_width]` for `NCW` data format. + filters: A 3-D `Tensor` with the same type as `input` and shape + `[filter_width, output_channels, in_channels]`. `filter`'s + `in_channels` dimension must match that of `input`. + output_shape: A 1-D `Tensor`, containing three elements, representing the + output shape of the deconvolution op. + strides: An int or list of `ints` that has length `1` or `3`. The number of + entries by which the filter is moved right at each step. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string. `'NWC'` and `'NCW'` are supported. + dilations: An int or list of `ints` that has length `1` or `3` which + defaults to 1. The dilation factor for each dimension of input. If set to + k > 1, there will be k-1 skipped cells between each filter element on that + dimension. Dilations in the batch and depth dimensions must be 1. + name: Optional name for the returned tensor. + + Returns: + A `Tensor` with the same type as `input`. + + Raises: + ValueError: If input/output depth does not match `filter`'s shape, if + `output_shape` is not at 3-element vector, if `padding` is other than + `'VALID'` or `'SAME'`, or if `data_format` is invalid. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "conv1d_transpose", + [input, filters, output_shape]) as name: + # The format could be either NWC or NCW, map to NHWC or NCHW + if data_format is None or data_format == "NWC": + data_format = "NHWC" + spatial_start_dim = 1 + channel_index = 2 + elif data_format == "NCW": + data_format = "NCHW" + spatial_start_dim = 2 + channel_index = 1 + else: + raise ValueError("`data_format` must be 'NWC' or 'NCW'. " + f"Received: data_format={data_format}") + + # Reshape the input tensor to [batch, 1, in_width, in_channels] + strides = [1] + _get_sequence(strides, 1, channel_index, "stride") + dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") + + input = array_ops.expand_dims(input, spatial_start_dim) + filters = array_ops.expand_dims(filters, 0) + output_shape = list(output_shape) if not isinstance( + output_shape, tensor_lib.Tensor) else output_shape + output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1], + output_shape[spatial_start_dim:]], 0) + + result = gen_nn_ops.conv2d_backprop_input( + input_sizes=output_shape, + filter=filters, + out_backprop=input, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + return array_ops.squeeze(result, spatial_start_dim) + + +@tf_export("nn.conv2d", v1=[]) +@dispatch.add_dispatch_support +def conv2d_v2(input, # pylint: disable=redefined-builtin + filters, + strides, + padding, + data_format="NHWC", + dilations=None, + name=None): + # pylint: disable=line-too-long + r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors. + + The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]` + are considered batch dimensions (`batch_shape`). + + Given an input tensor of shape + `batch_shape + [in_height, in_width, in_channels]` and a filter / kernel + tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, + this op performs the following: + + 1. Flattens the filter to a 2-D matrix with shape + `[filter_height * filter_width * in_channels, output_channels]`. + 2. Extracts image patches from the input tensor to form a *virtual* + tensor of shape `[batch, out_height, out_width, + filter_height * filter_width * in_channels]`. + 3. For each patch, right-multiplies the filter matrix and the image patch + vector. + + In detail, with the default NHWC format, + + output[b, i, j, k] = + sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + filter[di, dj, q, k] + + Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertical strides, `strides = [1, stride, stride, 1]`. + + Usage Example: + + >>> x_in = np.array([[ + ... [[2], [1], [2], [0], [1]], + ... [[1], [3], [2], [2], [3]], + ... [[1], [1], [3], [3], [0]], + ... [[2], [2], [0], [1], [1]], + ... [[0], [0], [3], [1], [2]], ]]) + >>> kernel_in = np.array([ + ... [ [[2, 0.1]], [[3, 0.2]] ], + ... [ [[0, 0.3]], [[1, 0.4]] ], ]) + >>> x = tf.constant(x_in, dtype=tf.float32) + >>> kernel = tf.constant(kernel_in, dtype=tf.float32) + >>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') + + + Args: + input: A `Tensor`. Must be one of the following types: + `half`, `bfloat16`, `float32`, `float64`. + A Tensor of rank at least 4. The dimension order is interpreted according + to the value of `data_format`; with the all-but-inner-3 dimensions acting + as batch dimensions. See below for details. + filters: A `Tensor`. Must have the same type as `input`. + A 4-D tensor of shape + `[filter_height, filter_width, in_channels, out_channels]` + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the `H` and `W` dimension. By default + the `N` and `C` dimensions are set to 1. The dimension order is determined + by the value of `data_format`, see below for details. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. + Defaults to `"NHWC"`. + Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: + `batch_shape + [height, width, channels]`. + Alternatively, the format could be "NCHW", the data storage order of: + `batch_shape + [channels, height, width]`. + dilations: An int or list of `ints` that has length `1`, `2` or `4`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `H` and `W` dimension. By + default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 4-d tensor + must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input` and the same outer batch shape. + """ + # pylint: enable=line-too-long + return conv2d(input, # pylint: disable=redefined-builtin + filters, + strides, + padding, + use_cudnn_on_gpu=True, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export(v1=["nn.conv2d"]) +@dispatch.add_dispatch_support +def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value + input, + filter=None, + strides=None, + padding=None, + use_cudnn_on_gpu=True, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None, + filters=None): + r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors. + + Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, out_channels]`, this op + performs the following: + + 1. Flattens the filter to a 2-D matrix with shape + `[filter_height * filter_width * in_channels, output_channels]`. + 2. Extracts image patches from the input tensor to form a *virtual* + tensor of shape `[batch, out_height, out_width, + filter_height * filter_width * in_channels]`. + 3. For each patch, right-multiplies the filter matrix and the image patch + vector. + + In detail, with the default NHWC format, + + output[b, i, j, k] = + sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] + * filter[di, dj, q, k] + + Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertical strides, `strides = [1, stride, stride, 1]`. + + Args: + input: A `Tensor`. Must be one of the following types: + `half`, `bfloat16`, `float32`, `float64`. + A 4-D tensor. The dimension order is interpreted according to the value + of `data_format`, see below for details. + filter: A `Tensor`. Must have the same type as `input`. + A 4-D tensor of shape + `[filter_height, filter_width, in_channels, out_channels]` + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the `H` and `W` dimension. By default + the `N` and `C` dimensions are set to 1. The dimension order is determined + by the value of `data_format`, see below for details. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. When explicit padding is used and + data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, + pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used + and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. + Defaults to `"NHWC"`. + Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: + [batch, height, width, channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, channels, height, width]. + dilations: An int or list of `ints` that has length `1`, `2` or `4`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `H` and `W` dimension. By + default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 4-d tensor + must be 1. + name: A name for the operation (optional). + filters: Alias for filter. + + Returns: + A `Tensor`. Has the same type as `input`. + """ + filter = deprecation.deprecated_argument_lookup( + "filters", filters, "filter", filter) + padding, explicit_paddings = convert_padding(padding) + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + strides = _get_sequence(strides, 2, channel_index, "strides") + dilations = _get_sequence(dilations, 2, channel_index, "dilations") + + shape = input.shape + # shape object may lack ndims, e.g., if input is an np.ndarray. In that case, + # we fall back to len(shape). + ndims = getattr(shape, "ndims", -1) + if ndims == -1: + ndims = len(shape) + if ndims in (4, 3, 2, 1, 0, None): + # We avoid calling squeeze_batch_dims to reduce extra python function + # call slowdown in eager mode. This branch doesn't require reshapes. + return gen_nn_ops.conv2d( + input, + filter=filter, + strides=strides, + padding=padding, + use_cudnn_on_gpu=use_cudnn_on_gpu, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations, + name=name) + return squeeze_batch_dims( + input, + functools.partial( + gen_nn_ops.conv2d, + filter=filter, + strides=strides, + padding=padding, + use_cudnn_on_gpu=use_cudnn_on_gpu, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations), + inner_rank=3, + name=name) + + +@tf_export(v1=["nn.conv2d_backprop_filter"]) +@dispatch.add_dispatch_support +def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value + input, + filter_sizes, + out_backprop, + strides, + padding, + use_cudnn_on_gpu=True, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None): + r"""Computes the gradients of convolution with respect to the filter. + + Args: + input: A `Tensor`. Must be one of the following types: + `half`, `bfloat16`, `float32`, `float64`. + 4-D with shape `[batch, in_height, in_width, in_channels]`. + filter_sizes: A `Tensor` of type `int32`. + An integer vector representing the tensor shape of `filter`, + where `filter` is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor. + out_backprop: A `Tensor`. Must have the same type as `input`. + 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution. + strides: A list of `ints`. + The stride of the sliding window for each dimension of the input + of the convolution. Must be in the same order as the dimension specified + with format. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. When explicit padding is used and + data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, + pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used + and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. + Defaults to `"NHWC"`. + Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: + [batch, in_height, in_width, in_channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, in_channels, in_height, in_width]. + dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. + 1-D tensor of length 4. The dilation factor for each dimension of + `input`. If set to k > 1, there will be k-1 skipped cells between each + filter element on that dimension. The dimension order is determined by + the value of `data_format`, see above for details. Dilations in the batch + and depth dimensions must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + padding, explicit_paddings = convert_padding(padding) + return gen_nn_ops.conv2d_backprop_filter( + input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, + explicit_paddings, data_format, dilations, name) + + +@tf_export(v1=["nn.conv2d_backprop_input"]) +@dispatch.add_dispatch_support +def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value + input_sizes, + filter=None, + out_backprop=None, + strides=None, + padding=None, + use_cudnn_on_gpu=True, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None, + filters=None): + r"""Computes the gradients of convolution with respect to the input. + + Args: + input_sizes: A `Tensor` of type `int32`. + An integer vector representing the shape of `input`, + where `input` is a 4-D `[batch, height, width, channels]` tensor. + filter: A `Tensor`. Must be one of the following types: + `half`, `bfloat16`, `float32`, `float64`. + 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. + out_backprop: A `Tensor`. Must have the same type as `filter`. + 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution. + strides: A list of `ints`. + The stride of the sliding window for each dimension of the input + of the convolution. Must be in the same order as the dimension specified + with format. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. When explicit padding is used and + data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, + pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used + and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. + Defaults to `"NHWC"`. + Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: + [batch, in_height, in_width, in_channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, in_channels, in_height, in_width]. + dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. + 1-D tensor of length 4. The dilation factor for each dimension of + `input`. If set to k > 1, there will be k-1 skipped cells between each + filter element on that dimension. The dimension order is determined by + the value of `data_format`, see above for details. Dilations in the batch + and depth dimensions must be 1. + name: A name for the operation (optional). + filters: Alias for filter. + + Returns: + A `Tensor`. Has the same type as `filter`. + """ + filter = deprecation.deprecated_argument_lookup( + "filters", filters, "filter", filter) + padding, explicit_paddings = convert_padding(padding) + return gen_nn_ops.conv2d_backprop_input( + input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, + explicit_paddings, data_format, dilations, name) + + +@tf_export(v1=["nn.conv2d_transpose"]) +@dispatch.add_dispatch_support +def conv2d_transpose( + value=None, + filter=None, # pylint: disable=redefined-builtin + output_shape=None, + strides=None, + padding="SAME", + data_format="NHWC", + name=None, + input=None, # pylint: disable=redefined-builtin + filters=None, + dilations=None): + """The transpose of `conv2d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d` + rather than an actual deconvolution. + + Args: + value: A 4-D `Tensor` of type `float` and shape + `[batch, height, width, in_channels]` for `NHWC` data format or + `[batch, in_channels, height, width]` for `NCHW` data format. + filter: A 4-D `Tensor` with the same type as `value` and shape + `[height, width, output_channels, in_channels]`. `filter`'s + `in_channels` dimension must match that of `value`. + output_shape: A 1-D `Tensor` representing the output shape of the + deconvolution op. + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the `H` and `W` dimension. By default + the `N` and `C` dimensions are set to 0. The dimension order is determined + by the value of `data_format`, see below for details. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. + See the "returns" section of `tf.nn.convolution` for details. + data_format: A string. 'NHWC' and 'NCHW' are supported. + name: Optional name for the returned tensor. + input: Alias for value. + filters: Alias for filter. + dilations: An int or list of `ints` that has length `1`, `2` or `4`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `H` and `W` dimension. By + default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 4-d tensor + must be 1. + + Returns: + A `Tensor` with the same type as `value`. + + Raises: + ValueError: If input/output depth does not match `filter`'s shape, or if + padding is other than `'VALID'` or `'SAME'`. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + value = deprecated_argument_lookup("input", input, "value", value) + filter = deprecated_argument_lookup("filters", filters, "filter", filter) + with ops.name_scope(name, "conv2d_transpose", + [value, filter, output_shape]) as name: + return conv2d_transpose_v2( + value, + filter, + output_shape, + strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export("nn.conv2d_transpose", v1=[]) +@dispatch.add_dispatch_support +def conv2d_transpose_v2( + input, # pylint: disable=redefined-builtin + filters, # pylint: disable=redefined-builtin + output_shape, + strides, + padding="SAME", + data_format="NHWC", + dilations=None, + name=None): + """The transpose of `conv2d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of + `atrous_conv2d` rather than an actual deconvolution. + + Args: + input: A 4-D `Tensor` of type `float` and shape `[batch, height, width, + in_channels]` for `NHWC` data format or `[batch, in_channels, height, + width]` for `NCHW` data format. + filters: A 4-D `Tensor` with the same type as `input` and shape `[height, + width, output_channels, in_channels]`. `filter`'s `in_channels` dimension + must match that of `input`. + output_shape: A 1-D `Tensor` representing the output shape of the + deconvolution op. + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the `H` and `W` dimension. By default + the `N` and `C` dimensions are set to 0. The dimension order is determined + by the value of `data_format`, see below for details. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + data_format: A string. 'NHWC' and 'NCHW' are supported. + dilations: An int or list of `ints` that has length `1`, `2` or `4`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `H` and `W` dimension. By + default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 4-d tensor + must be 1. + name: Optional name for the returned tensor. + + Returns: + A `Tensor` with the same type as `input`. + + Raises: + ValueError: If input/output depth does not match `filter`'s shape, or if + padding is other than `'VALID'` or `'SAME'`. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "conv2d_transpose", + [input, filter, output_shape]) as name: + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + strides = _get_sequence(strides, 2, channel_index, "strides") + dilations = _get_sequence(dilations, 2, channel_index, "dilations") + padding, explicit_paddings = convert_padding(padding) + + return gen_nn_ops.conv2d_backprop_input( + input_sizes=output_shape, + filter=filters, + out_backprop=input, + strides=strides, + padding=padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations, + name=name) + + +def _conv2d_expanded_batch( + input, # pylint: disable=redefined-builtin + filters, + strides, + padding, + data_format, + dilations, + name): + """Helper function for `convolution_internal`; handles expanded batches.""" + # Try really hard to avoid modifying the legacy name scopes - return early. + input_rank = input.shape.rank + if input_rank is None or input_rank < 5: + # We avoid calling squeeze_batch_dims to reduce extra python function + # call slowdown in eager mode. This branch doesn't require reshapes. + return gen_nn_ops.conv2d( + input, + filter=filters, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + return squeeze_batch_dims( + input, + functools.partial( + gen_nn_ops.conv2d, + filter=filters, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations), + inner_rank=3, + name=name) + + +@tf_export("nn.atrous_conv2d_transpose") +@dispatch.add_dispatch_support +def atrous_conv2d_transpose(value, + filters, + output_shape, + rate, + padding, + name=None): + """The transpose of `atrous_conv2d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of + `atrous_conv2d` rather than an actual deconvolution. + + Args: + value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC` + format. Its shape is `[batch, in_height, in_width, in_channels]`. + filters: A 4-D `Tensor` with the same type as `value` and shape + `[filter_height, filter_width, out_channels, in_channels]`. `filters`' + `in_channels` dimension must match that of `value`. Atrous convolution is + equivalent to standard convolution with upsampled filters with effective + height `filter_height + (filter_height - 1) * (rate - 1)` and effective + width `filter_width + (filter_width - 1) * (rate - 1)`, produced by + inserting `rate - 1` zeros along consecutive elements across the + `filters`' spatial dimensions. + output_shape: A 1-D `Tensor` of shape representing the output shape of the + deconvolution op, of form `[batch, out_height, out_width, out_channels]`. + rate: A positive int32. The stride with which we sample input values across + the `height` and `width` dimensions. Equivalently, the rate by which we + upsample the filter values by inserting zeros across the `height` and + `width` dimensions. In the literature, the same parameter is sometimes + called `input stride` or `dilation`. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + name: Optional name for the returned tensor. + + Returns: + A `Tensor` with the same type as `value`. + + Raises: + ValueError: If input/output depth does not match `filters`' shape, or if + padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less + than one, or if the output_shape is not a tensor with 4 elements. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "atrous_conv2d_transpose", + [value, filters, output_shape]) as name: + value = ops.convert_to_tensor(value, name="value") + filters = ops.convert_to_tensor(filters, name="filters") + if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]): + raise ValueError( + "`value` channel count must be compatible with `filters` input " + f"channel count. Received: value.shape={value.get_shape()} with " + f"channel count {value.get_shape()[3]} and " + f"filters.shape={filters.get_shape()} with input channel count " + f"{filters.get_shape()[3]}.") + if rate < 1: + raise ValueError(f"`rate` cannot be less than one. Received: rate={rate}") + + if rate == 1: + return conv2d_transpose( + value, + filters, + output_shape, + strides=[1, 1, 1, 1], + padding=padding, + data_format="NHWC") + + output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape") + if not output_shape_.get_shape().is_compatible_with( + tensor_shape.TensorShape([4])): + raise ValueError("`output_shape` must have shape (4,). " + f"Received: output_shape={output_shape_.get_shape()}") + + if isinstance(output_shape, tuple): + output_shape = list(output_shape) + + if isinstance(output_shape, (list, np.ndarray)): + # output_shape's shape should be == [4] if reached this point. + if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]): + raise ValueError( + "`output_shape` channel count must be compatible with `filters` " + f"output channel count. Received: output_shape={output_shape} with " + f"channel count {output_shape[3]} and " + f"filters.shape={filters.get_shape()} with output channel count " + f"{filters.get_shape()[3]}.") + + # We have two padding contributions. The first is used for converting "SAME" + # to "VALID". The second is required so that the height and width of the + # zero-padded value tensor are multiples of rate. + + # Padding required to reduce to "VALID" convolution + if padding == "SAME": + # Handle filters whose shape is unknown during graph creation. + if filters.get_shape().is_fully_defined(): + filter_shape = filters.get_shape().as_list() + else: + filter_shape = array_ops.shape(filters) + filter_height, filter_width = filter_shape[0], filter_shape[1] + + # Spatial dimensions of the filters and the upsampled filters in which we + # introduce (rate - 1) zeros between consecutive filter values. + filter_height_up = filter_height + (filter_height - 1) * (rate - 1) + filter_width_up = filter_width + (filter_width - 1) * (rate - 1) + + pad_height = filter_height_up - 1 + pad_width = filter_width_up - 1 + + # When pad_height (pad_width) is odd, we pad more to bottom (right), + # following the same convention as conv2d(). + pad_top = pad_height // 2 + pad_bottom = pad_height - pad_top + pad_left = pad_width // 2 + pad_right = pad_width - pad_left + elif padding == "VALID": + pad_top = 0 + pad_bottom = 0 + pad_left = 0 + pad_right = 0 + else: + raise ValueError("`padding` must be either 'VALID' or 'SAME'. " + f"Received: padding={padding}") + + in_height = output_shape[1] + pad_top + pad_bottom + in_width = output_shape[2] + pad_left + pad_right + + # More padding so that rate divides the height and width of the input. + pad_bottom_extra = (rate - in_height % rate) % rate + pad_right_extra = (rate - in_width % rate) % rate + + # The paddings argument to space_to_batch is just the extra padding + # component. + space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]] + + value = array_ops.space_to_batch( + input=value, paddings=space_to_batch_pad, block_size=rate) + + input_sizes = [ + rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate, + (in_width + pad_right_extra) // rate, output_shape[3] + ] + + value = gen_nn_ops.conv2d_backprop_input( + input_sizes=input_sizes, + filter=filters, + out_backprop=value, + strides=[1, 1, 1, 1], + padding="VALID", + data_format="NHWC") + + # The crops argument to batch_to_space includes both padding components. + batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra], + [pad_left, pad_right + pad_right_extra]] + + return array_ops.batch_to_space( + input=value, crops=batch_to_space_crop, block_size=rate) + + +@tf_export(v1=["nn.depthwise_conv2d_native"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native") +def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value + input, + filter, + strides, + padding, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None): + r"""Computes a 2-D depthwise convolution. + + Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + a different filter to each input channel (expanding from 1 channel to + `channel_multiplier` channels for each), then concatenates the results + together. Thus, the output has `in_channels * channel_multiplier` channels. + + ``` + for k in 0..in_channels-1 + for q in 0..channel_multiplier-1 + output[b, i, j, k * channel_multiplier + q] = + sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + filter[di, dj, k, q] + ``` + + Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + + Args: + input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, + `float32`, `float64`. + filter: A `Tensor`. Must have the same type as `input`. + strides: A list of `ints`. 1-D of length 4. The stride of the sliding + window for each dimension of `input`. + padding: Controls how to pad the image before applying the convolution. Can + be the string `"SAME"` or `"VALID"` indicating the type of padding + algorithm to use, or a list indicating the explicit paddings at the start + and end of each dimension. When explicit padding is used and data_format + is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to + `"NHWC"`. Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: [batch, height, + width, channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, channels, height, width]. + dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D + tensor of length 4. The dilation factor for each dimension of `input`. If + set to k > 1, there will be k-1 skipped cells between each filter element + on that dimension. The dimension order is determined by the value of + `data_format`, see above for details. Dilations in the batch and depth + dimensions must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + padding, explicit_paddings = convert_padding(padding) + return gen_nn_ops.depthwise_conv2d_native( + input, + filter, + strides, + padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export( + "nn.depthwise_conv2d_backprop_input", + v1=[ + "nn.depthwise_conv2d_native_backprop_input", + "nn.depthwise_conv2d_backprop_input" + ]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input") +def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value + input_sizes, + filter, + out_backprop, + strides, + padding, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None): + r"""Computes the gradients of depthwise convolution with respect to the input. + + Args: + input_sizes: A `Tensor` of type `int32`. An integer vector representing the + shape of `input`, based on `data_format`. For example, if `data_format` + is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor. + filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, + `float32`, `float64`. 4-D with shape `[filter_height, filter_width, + in_channels, depthwise_multiplier]`. + out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with + shape based on `data_format`. For example, if `data_format` is 'NHWC' + then out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution. + strides: A list of `ints`. The stride of the sliding window for each + dimension of the input of the convolution. + padding: Controls how to pad the image before applying the convolution. Can + be the string `"SAME"` or `"VALID"` indicating the type of padding + algorithm to use, or a list indicating the explicit paddings at the start + and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to + `"NHWC"`. Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: [batch, height, + width, channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, channels, height, width]. + dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D + tensor of length 4. The dilation factor for each dimension of `input`. If + set to k > 1, there will be k-1 skipped cells between each filter element + on that dimension. The dimension order is determined by the value of + `data_format`, see above for details. Dilations in the batch and depth + dimensions must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `filter`. + """ + padding, explicit_paddings = convert_padding(padding) + return gen_nn_ops.depthwise_conv2d_native_backprop_input( + input_sizes, + filter, + out_backprop, + strides, + padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export( + "nn.depthwise_conv2d_backprop_filter", + v1=[ + "nn.depthwise_conv2d_native_backprop_filter", + "nn.depthwise_conv2d_backprop_filter" + ]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter") +def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value + input, + filter_sizes, + out_backprop, + strides, + padding, + data_format="NHWC", + dilations=[1, 1, 1, 1], + name=None): + r"""Computes the gradients of depthwise convolution with respect to the filter. + + Args: + input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, + `float32`, `float64`. 4-D with shape based on `data_format`. For example, + if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, + in_width, in_channels]` tensor. + filter_sizes: A `Tensor` of type `int32`. An integer vector representing the + tensor shape of `filter`, where `filter` is a 4-D `[filter_height, + filter_width, in_channels, depthwise_multiplier]` tensor. + out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape + based on `data_format`. For example, if `data_format` is 'NHWC' then + out_backprop shape is `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution. + strides: A list of `ints`. The stride of the sliding window for each + dimension of the input of the convolution. + padding: Controls how to pad the image before applying the convolution. Can + be the string `"SAME"` or `"VALID"` indicating the type of padding + algorithm to use, or a list indicating the explicit paddings at the start + and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. + data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to + `"NHWC"`. Specify the data format of the input and output data. With the + default format "NHWC", the data is stored in the order of: [batch, height, + width, channels]. + Alternatively, the format could be "NCHW", the data storage order of: + [batch, channels, height, width]. + dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D + tensor of length 4. The dilation factor for each dimension of `input`. If + set to k > 1, there will be k-1 skipped cells between each filter element + on that dimension. The dimension order is determined by the value of + `data_format`, see above for details. Dilations in the batch and depth + dimensions must be 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + padding, explicit_paddings = convert_padding(padding) + return gen_nn_ops.depthwise_conv2d_native_backprop_filter( + input, + filter_sizes, + out_backprop, + strides, + padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + dilations=dilations, + name=name) + + +def _conv3d_expanded_batch( + input, # pylint: disable=redefined-builtin + filter, # pylint: disable=redefined-builtin + strides, + padding, + data_format, + dilations=None, + name=None): + """Helper function for `conv3d`; handles expanded batches.""" + shape = input.shape + # shape object may lack ndims, e.g., if input is an np.ndarray. In that case, + # we fall back to len(shape). + ndims = getattr(shape, "ndims", -1) + if ndims == -1: + ndims = len(shape) + if ndims in (5, 4, 3, 2, 1, 0, None): + # We avoid calling squeeze_batch_dims to reduce extra python function + # call slowdown in eager mode. This branch doesn't require reshapes. + return gen_nn_ops.conv3d( + input, + filter, + strides, + padding, + data_format=data_format, + dilations=dilations, + name=name) + else: + return squeeze_batch_dims( + input, + functools.partial( + gen_nn_ops.conv3d, + filter=filter, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations), + inner_rank=4, + name=name) + + +@tf_export("nn.conv3d", v1=[]) +@dispatch.add_dispatch_support +def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring + filters, + strides, + padding, + data_format="NDHWC", + dilations=None, + name=None): + if dilations is None: + dilations = [1, 1, 1, 1, 1] + return _conv3d_expanded_batch(input, filters, strides, padding, data_format, + dilations, name) + + +@tf_export(v1=["nn.conv3d"]) +@dispatch.add_dispatch_support +def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value + input, # pylint: disable=redefined-builtin + filter=None, # pylint: disable=redefined-builtin + strides=None, + padding=None, + data_format="NDHWC", + dilations=[1, 1, 1, 1, 1], + name=None, + filters=None): + filter = deprecated_argument_lookup("filters", filters, "filter", filter) + return gen_nn_ops.conv3d( + input, filter, strides, padding, data_format, dilations, name) + + +conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring( + gen_nn_ops.conv3d.__doc__, "filter", "filters") +conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__ + + +@tf_export(v1=["nn.conv3d_transpose"]) +@dispatch.add_dispatch_support +def conv3d_transpose( + value, + filter=None, # pylint: disable=redefined-builtin + output_shape=None, + strides=None, + padding="SAME", + data_format="NDHWC", + name=None, + input=None, # pylint: disable=redefined-builtin + filters=None, + dilations=None): + """The transpose of `conv3d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` + rather than an actual deconvolution. + + Args: + value: A 5-D `Tensor` of type `float` and shape + `[batch, depth, height, width, in_channels]`. + filter: A 5-D `Tensor` with the same type as `value` and shape + `[depth, height, width, output_channels, in_channels]`. `filter`'s + `in_channels` dimension must match that of `value`. + output_shape: A 1-D `Tensor` representing the output shape of the + deconvolution op. + strides: A list of ints. The stride of the sliding window for each + dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. + See the "returns" section of `tf.nn.convolution` for details. + data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout + of the input and output tensors. Defaults to `'NDHWC'`. + name: Optional name for the returned tensor. + input: Alias of value. + filters: Alias of filter. + dilations: An int or list of `ints` that has length `1`, `3` or `5`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `D`, `H` and `W` dimension. + By default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 5-d tensor + must be 1. + + Returns: + A `Tensor` with the same type as `value`. + + Raises: + ValueError: If input/output depth does not match `filter`'s shape, or if + padding is other than `'VALID'` or `'SAME'`. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + filter = deprecated_argument_lookup("filters", filters, "filter", filter) + value = deprecated_argument_lookup("input", input, "value", value) + return conv3d_transpose_v2( + value, + filter, + output_shape, + strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export("nn.conv3d_transpose", v1=[]) +@dispatch.add_dispatch_support +def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin + filters, + output_shape, + strides, + padding="SAME", + data_format="NDHWC", + dilations=None, + name=None): + """The transpose of `conv3d`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` + rather than an actual deconvolution. + + Args: + input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, + width, in_channels]` for `NDHWC` data format or `[batch, in_channels, + depth, height, width]` for `NCDHW` data format. + filters: A 5-D `Tensor` with the same type as `input` and shape `[depth, + height, width, output_channels, in_channels]`. `filter`'s `in_channels` + dimension must match that of `input`. + output_shape: A 1-D `Tensor` representing the output shape of the + deconvolution op. + strides: An int or list of `ints` that has length `1`, `3` or `5`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the `D`, `H` and `W` dimension. By + default the `N` and `C` dimensions are set to 0. The dimension order is + determined by the value of `data_format`, see below for details. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string. 'NDHWC' and 'NCDHW' are supported. + dilations: An int or list of `ints` that has length `1`, `3` or `5`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the `D`, `H` and `W` dimension. + By default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. Dilations in the batch and depth dimensions if a 5-d tensor + must be 1. + name: Optional name for the returned tensor. + + Returns: + A `Tensor` with the same type as `input`. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "conv3d_transpose", + [input, filter, output_shape]) as name: + if data_format is None: + data_format = "NDHWC" + channel_index = 1 if data_format.startswith("NC") else 4 + + strides = _get_sequence(strides, 3, channel_index, "strides") + dilations = _get_sequence(dilations, 3, channel_index, "dilations") + + return gen_nn_ops.conv3d_backprop_input_v2( + input_sizes=output_shape, + filter=filters, + out_backprop=input, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + + +CONV_TRANSPOSE_OPS = ( + conv1d_transpose, + conv2d_transpose_v2, + conv3d_transpose_v2, +) + + +@tf_export("nn.conv_transpose") +@dispatch.add_dispatch_support +def conv_transpose(input, # pylint: disable=redefined-builtin + filters, + output_shape, + strides, + padding="SAME", + data_format=None, + dilations=None, + name=None): + """The transpose of `convolution`. + + This operation is sometimes called "deconvolution" after + (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` + rather than an actual deconvolution. + + Args: + input: An N+2 dimensional `Tensor` of shape + `[batch_size] + input_spatial_shape + [in_channels]` if data_format does + not start with "NC" (default), or + `[batch_size, in_channels] + input_spatial_shape` if data_format starts + with "NC". It must be one of the following types: + `half`, `bfloat16`, `float32`, `float64`. + filters: An N+2 dimensional `Tensor` with the same type as `input` and + shape `spatial_filter_shape + [in_channels, out_channels]`. + output_shape: A 1-D `Tensor` representing the output shape of the + deconvolution op. + strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The + stride of the sliding window for each dimension of `input`. If a single + value is given it is replicated in the spatial dimensions. By default + the `N` and `C` dimensions are set to 0. The dimension order is determined + by the value of `data_format`, see below for details. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations: An int or list of `ints` that has length `1`, `N` or `N+2`, + defaults to 1. The dilation factor for each dimension of`input`. If a + single value is given it is replicated in the spatial dimensions. By + default the `N` and `C` dimensions are set to 1. If set to k > 1, there + will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of `data_format`, see above + for details. + name: A name for the operation (optional). If not specified "conv_transpose" + is used. + + Returns: + A `Tensor` with the same type as `value`. + + References: + Deconvolutional Networks: + [Zeiler et al., 2010] + (https://ieeexplore.ieee.org/abstract/document/5539957) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "conv_transpose", + [input, filter, output_shape]) as name: + if tensor_util.is_tf_type(output_shape): + n = output_shape.shape[0] - 2 + elif isinstance(output_shape, collections_abc.Sized): + n = len(output_shape) - 2 + else: + raise ValueError("`output_shape` must be a tensor or sized collection. " + f"Received: output_shape={output_shape}") + + if not 1 <= n <= 3: + raise ValueError( + f"`output_shape` must be of length 3, 4 or 5. " + f"Received: output_shape={output_shape} of length {n + 2}.") + + op = CONV_TRANSPOSE_OPS[n-1] + return op( + input, + filters, + output_shape, + strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name) + + +@tf_export("nn.bias_add") +@dispatch.add_dispatch_support +def bias_add(value, bias, data_format=None, name=None): + """Adds `bias` to `value`. + + This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. + Broadcasting is supported, so `value` may have any number of dimensions. + Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the + case where both types are quantized. + + Args: + value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, + `int16`, `int8`, `complex64`, or `complex128`. + bias: A 1-D `Tensor` with size matching the channel dimension of `value`. + Must be the same type as `value` unless `value` is a quantized type, + in which case a different quantized type may be used. + data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the + default) is specified then 'N..C' is assumed. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same type as `value`. + + Raises: + ValueError if data format is unrecognized, if `value` has less than two + dimensions when `data_format` is 'N..C'/`None` or `value` has less + then three dimensions when `data_format` is `NC..`, if `bias` does not + have exactly one dimension (is a vector), or if the size of `bias` + does not match the size of the channel dimension of `value`. + """ + with ops.name_scope(name, "BiasAdd", [value, bias]) as name: + if data_format is not None: + if data_format.startswith("NC"): + data_format = "NCHW" + elif data_format.startswith("N") and data_format.endswith("C"): + data_format = "NHWC" + else: + raise ValueError("`data_format` must be of the form `N...C` or " + f"`NC...`. Received: data_format={data_format}") + + if not context.executing_eagerly(): + value = ops.convert_to_tensor(value, name="input") + bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") + + return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name) + + +def bias_add_v1(value, bias, name=None): + """Adds `bias` to `value`. + + This is a deprecated version of bias_add and will soon to be removed. + + This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. + Broadcasting is supported, so `value` may have any number of dimensions. + Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the + case where both types are quantized. + + Args: + value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, + `int16`, `int8`, `complex64`, or `complex128`. + bias: A 1-D `Tensor` with size matching the last dimension of `value`. + Must be the same type as `value` unless `value` is a quantized type, + in which case a different quantized type may be used. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same type as `value`. + """ + with ops.name_scope(name, "BiasAddV1", [value, bias]) as name: + value = ops.convert_to_tensor(value, name="input") + bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") + return gen_nn_ops.bias_add_v1(value, bias, name=name) + + +@tf_export(v1=["nn.crelu"]) +@dispatch.add_dispatch_support +def crelu(features, name=None, axis=-1): + """Computes Concatenated ReLU. + + Concatenates a ReLU which selects only the positive part of the activation + with a ReLU which selects only the *negative* part of the activation. + Note that as a result this non-linearity doubles the depth of the activations. + Source: [Understanding and Improving Convolutional Neural Networks via + Concatenated Rectified Linear Units. W. Shang, et + al.](https://arxiv.org/abs/1603.05201) + + Args: + features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, + `int16`, or `int8`. + name: A name for the operation (optional). + axis: The axis that the output values are concatenated along. Default is -1. + + Returns: + A `Tensor` with the same type as `features`. + + References: + Understanding and Improving Convolutional Neural Networks via Concatenated + Rectified Linear Units: + [Shang et al., 2016](http://proceedings.mlr.press/v48/shang16) + ([pdf](http://proceedings.mlr.press/v48/shang16.pdf)) + """ + with ops.name_scope(name, "CRelu", [features]) as name: + features = ops.convert_to_tensor(features, name="features") + c = array_ops.concat([features, -features], axis, name=name) # pylint: disable=invalid-unary-operand-type + return gen_nn_ops.relu(c) + + +@tf_export("nn.crelu", v1=[]) +@dispatch.add_dispatch_support +def crelu_v2(features, axis=-1, name=None): + return crelu(features, name=name, axis=axis) +crelu_v2.__doc__ = crelu.__doc__ + + +@tf_export("nn.relu6") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def relu6(features, name=None): + """Computes Rectified Linear 6: `min(max(features, 0), 6)`. + + In comparison with `tf.nn.relu`, relu6 activation functions have shown to + empirically perform better under low-precision conditions (e.g. fixed point + inference) by encouraging the model to learn sparse features earlier. + Source: [Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., + 2010](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf). + + For example: + + >>> x = tf.constant([-3.0, -1.0, 0.0, 6.0, 10.0], dtype=tf.float32) + >>> y = tf.nn.relu6(x) + >>> y.numpy() + array([0., 0., 0., 6., 6.], dtype=float32) + + Args: + features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, + `int16`, or `int8`. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same type as `features`. + + References: + Convolutional Deep Belief Networks on CIFAR-10: + Krizhevsky et al., 2010 + ([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)) + """ + with ops.name_scope(name, "Relu6", [features]) as name: + features = ops.convert_to_tensor(features, name="features") + return gen_nn_ops.relu6(features, name=name) + + +@tf_export("nn.leaky_relu") +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def leaky_relu(features, alpha=0.2, name=None): + """Compute the Leaky ReLU activation function. + + Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models. + AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013] + (https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). + + Args: + features: A `Tensor` representing preactivation values. Must be one of + the following types: `float16`, `float32`, `float64`, `int32`, `int64`. + alpha: Slope of the activation function at x < 0. + name: A name for the operation (optional). + + Returns: + The activation value. + + References: + Rectifier Nonlinearities Improve Neural Network Acoustic Models: + [Maas et al., 2013] + (http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422) + ([pdf] + (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf)) + """ + with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name: + features = ops.convert_to_tensor(features, name="features") + if features.dtype.is_integer: + features = math_ops.cast(features, dtypes.float32) + if isinstance(alpha, np.ndarray): + alpha = alpha.item() + return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name) + + +@tf_export("nn.gelu", v1=[]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def gelu(features, approximate=False, name=None): + """Compute the Gaussian Error Linear Unit (GELU) activation function. + + Gaussian error linear unit (GELU) computes + `x * P(X <= x)`, where `P(X) ~ N(0, 1)`. + The (GELU) nonlinearity weights inputs by their value, rather than gates + inputs by their sign as in ReLU. + + For example: + + >>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32) + >>> y = tf.nn.gelu(x) + >>> y.numpy() + array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ], + dtype=float32) + >>> y = tf.nn.gelu(x, approximate=True) + >>> y.numpy() + array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], + dtype=float32) + + Args: + features: A `float Tensor` representing preactivation values. + approximate: An optional `bool`. Defaults to `False`. Whether to enable + approximation. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same type as `features`. + + Raises: + ValueError: if `features` is not a floating point `Tensor`. + + References: + [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415). + """ + with ops.name_scope(name, "Gelu", [features]): + features = ops.convert_to_tensor(features, name="features") + if not features.dtype.is_floating: + raise ValueError( + "`features.dtype` must be a floating point tensor." + f"Received:features.dtype={features.dtype}") + if approximate: + coeff = math_ops.cast(0.044715, features.dtype) + return 0.5 * features * ( + 1.0 + math_ops.tanh(0.7978845608028654 * + (features + coeff * math_ops.pow(features, 3)))) + else: + return 0.5 * features * (1.0 + math_ops.erf( + features / math_ops.cast(1.4142135623730951, features.dtype))) + + +def _flatten_outer_dims(logits): + """Flattens logits' outer dimensions and keep its last dimension.""" + rank = array_ops.rank(logits) + last_dim_size = array_ops.slice( + array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) + output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) + + # Set output shape if known. + if not context.executing_eagerly(): + shape = logits.get_shape() + if shape is not None and shape.dims is not None: + shape = shape.as_list() + product = 1 + product_valid = True + for d in shape[:-1]: + if d is None: + product_valid = False + break + else: + product *= d + if product_valid: + output_shape = [product, shape[-1]] + output.set_shape(output_shape) + + return output + + +def _wrap_2d_function(inputs, compute_op, dim=-1, name=None): + """Helper function for ops that accept and return 2d inputs of same shape. + + It reshapes and transposes the inputs into a 2-D Tensor and then invokes + the given function. The output would be transposed and reshaped back. + If the given function returns a tuple of tensors, each of them will be + transposed and reshaped. + + Args: + inputs: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + compute_op: The function to wrap. Must accept the input tensor as its first + arugment, and a second keyword argument `name`. + dim: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same shape as inputs. If compute_op returns multiple + tensors, each of them have the same shape as the input. + Raises: + InvalidArgumentError: if `inputs` is empty or `dim` is beyond the last + dimension of `inputs`. + """ + + def _swap_axis(input_tensor, dim_index, last_index, name=None): + """Swaps logits's dim_index and last_index.""" + return array_ops.transpose( + input_tensor, + array_ops.concat([ + math_ops.range(dim_index), [last_index], + math_ops.range(dim_index + 1, last_index), [dim_index] + ], 0), + name=name) + + inputs = ops.convert_to_tensor(inputs) + + # We need its original shape for shape inference. + shape = inputs.get_shape() + is_last_dim = (dim == -1) or (dim == shape.ndims - 1) + + if is_last_dim: + return compute_op(inputs, name=name) + + dim_val = dim + if isinstance(dim, tensor_lib.Tensor): + dim_val = tensor_util.constant_value(dim) + if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims: + raise errors_impl.InvalidArgumentError( + None, None, + f"`dim` must be in the range [{-shape.ndims}, {shape.ndims}) where " + f"{shape.ndims} is the number of dimensions in the input. " + f"Received: dim={dim_val}") + + # If dim is not the last dimension, we have to do a transpose so that we can + # still perform the op on its last dimension. + + # In case dim is negative (and is not last dimension -1), add shape.ndims + ndims = array_ops.rank(inputs) + if not isinstance(dim, tensor_lib.Tensor): + if dim < 0: + dim += ndims + else: + dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim) + + # Swap logits' dimension of dim and its last dimension. + input_rank = array_ops.rank(inputs) + dim_axis = dim % shape.ndims + inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1)) + + # Do the actual call on its last dimension. + def fix_output(output): + output = _swap_axis( + output, dim_axis, math_ops.subtract(input_rank, 1), name=name) + + # Make shape inference work since transpose may erase its static shape. + output.set_shape(shape) + return output + + outputs = compute_op(inputs) + if isinstance(outputs, tuple): + return tuple(fix_output(output) for output in outputs) + else: + return fix_output(outputs) + + +@tf_export("nn.softmax", "math.softmax", v1=[]) +@dispatch.add_dispatch_support +def softmax_v2(logits, axis=None, name=None): + """Computes softmax activations. + + Used for multi-class predictions. The sum of all outputs generated by softmax + is 1. + + This function performs the equivalent of + + ```python + softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis, keepdims=True) + ``` + Example usage: + + >>> softmax = tf.nn.softmax([-1, 0., 1.]) + >>> softmax + + >>> sum(softmax) + + + Args: + logits: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + axis: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type and shape as `logits`. + + Raises: + InvalidArgumentError: if `logits` is empty or `axis` is beyond the last + dimension of `logits`. + """ + if axis is None: + axis = -1 + return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name) + + +@tf_export(v1=["nn.softmax", "math.softmax"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") +def softmax(logits, axis=None, name=None, dim=None): + axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) + if axis is None: + axis = -1 + return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name) + + +softmax.__doc__ = softmax_v2.__doc__ + + +@tf_export(v1=["nn.log_softmax", "math.log_softmax"]) +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") +def log_softmax(logits, axis=None, name=None, dim=None): + """Computes log softmax activations. + + For each batch `i` and class `j` we have + + logsoftmax = logits - log(reduce_sum(exp(logits), axis)) + + Args: + logits: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + axis: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + dim: Deprecated alias for `axis`. + + Returns: + A `Tensor`. Has the same type as `logits`. Same shape as `logits`. + + Raises: + InvalidArgumentError: if `logits` is empty or `axis` is beyond the last + dimension of `logits`. + """ + axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) + if axis is None: + axis = -1 + return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) + + +@tf_export("nn.log_softmax", "math.log_softmax", v1=[]) +@dispatch.add_dispatch_support +def log_softmax_v2(logits, axis=None, name=None): + """Computes log softmax activations. + + For each batch `i` and class `j` we have + + logsoftmax = logits - log(reduce_sum(exp(logits), axis)) + + Args: + logits: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + axis: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `logits`. Same shape as `logits`. + + Raises: + InvalidArgumentError: if `logits` is empty or `axis` is beyond the last + dimension of `logits`. + """ + if axis is None: + axis = -1 + return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) + + +def _ensure_xent_args(name, labels, logits): + if labels is None or logits is None: + raise ValueError(f"Both `labels` and `logits` must be provided for {name}" + f"Received: labels={labels} and logits={logits}") + + +@tf_export("nn.softmax_cross_entropy_with_logits", v1=[]) +@dispatch.add_dispatch_support +def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None): + """Computes softmax cross entropy between `logits` and `labels`. + + Measures the probability error in discrete classification tasks in which the + classes are mutually exclusive (each entry is in exactly one class). For + example, each CIFAR-10 image is labeled with one and only one label: an image + can be a dog or a truck, but not both. + + **NOTE:** While the classes are mutually exclusive, their probabilities + need not be. All that is required is that each row of `labels` is + a valid probability distribution. If they are not, the computation of the + gradient will be incorrect. + + If using exclusive `labels` (wherein one and only + one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. + + Usage: + + >>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]] + >>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]] + >>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) + + + **WARNING:** This op expects unscaled logits, since it performs a `softmax` + on `logits` internally for efficiency. Do not call this op with the + output of `softmax`, as it will produce incorrect results. + + A common use case is to have logits and labels of shape + `[batch_size, num_classes]`, but higher dimensions are supported, with + the `axis` argument specifying the class dimension. + + `logits` and `labels` must have the same dtype (either `float16`, `float32`, + or `float64`). + + Backpropagation will happen into both `logits` and `labels`. To disallow + backpropagation into `labels`, pass label tensors through `tf.stop_gradient` + before feeding it to this function. + + **Note that to avoid confusion, it is required to pass only named arguments to + this function.** + + Args: + labels: Each vector along the class dimension should hold a valid + probability distribution e.g. for the case in which labels are of shape + `[batch_size, num_classes]`, each row of `labels[i]` must be a valid + probability distribution. + logits: Per-label activations, typically a linear output. These activation + energies are interpreted as unnormalized log probabilities. + axis: The class dimension. Defaulted to -1 which is the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor` that contains the softmax cross entropy loss. Its type is the + same as `logits` and its shape is the same as `labels` except that it does + not have the last dimension of `labels`. + """ + return softmax_cross_entropy_with_logits_v2_helper( + labels=labels, logits=logits, axis=axis, name=name) + + +@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"]) +@dispatch.add_dispatch_support +@deprecated_args(None, "dim is deprecated, use axis instead", "dim") +def softmax_cross_entropy_with_logits_v2_helper( + labels, logits, axis=None, name=None, dim=None): + """Computes softmax cross entropy between `logits` and `labels`. + + Measures the probability error in discrete classification tasks in which the + classes are mutually exclusive (each entry is in exactly one class). For + example, each CIFAR-10 image is labeled with one and only one label: an image + can be a dog or a truck, but not both. + + **NOTE:** While the classes are mutually exclusive, their probabilities + need not be. All that is required is that each row of `labels` is + a valid probability distribution. If they are not, the computation of the + gradient will be incorrect. + + If using exclusive `labels` (wherein one and only + one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. + + **WARNING:** This op expects unscaled logits, since it performs a `softmax` + on `logits` internally for efficiency. Do not call this op with the + output of `softmax`, as it will produce incorrect results. + + A common use case is to have logits and labels of shape + `[batch_size, num_classes]`, but higher dimensions are supported, with + the `axis` argument specifying the class dimension. + + `logits` and `labels` must have the same dtype (either `float16`, `float32`, + or `float64`). + + Backpropagation will happen into both `logits` and `labels`. To disallow + backpropagation into `labels`, pass label tensors through `tf.stop_gradient` + before feeding it to this function. + + **Note that to avoid confusion, it is required to pass only named arguments to + this function.** + + Args: + labels: Each vector along the class dimension should hold a valid + probability distribution e.g. for the case in which labels are of shape + `[batch_size, num_classes]`, each row of `labels[i]` must be a valid + probability distribution. + logits: Unscaled log probabilities. + axis: The class dimension. Defaulted to -1 which is the last dimension. + name: A name for the operation (optional). + dim: Deprecated alias for axis. + + Returns: + A `Tensor` that contains the softmax cross entropy loss. Its type is the + same as `logits` and its shape is the same as `labels` except that it does + not have the last dimension of `labels`. + """ + # TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This + # could break users who call this with bad labels, but disregard the bad + # results. + axis = deprecated_argument_lookup("axis", axis, "dim", dim) + del dim + if axis is None: + axis = -1 + + with ops.name_scope(name, "softmax_cross_entropy_with_logits", + [logits, labels]) as name: + logits = ops.convert_to_tensor(logits, name="logits") + labels = ops.convert_to_tensor(labels, name="labels") + convert_to_float32 = ( + logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16) + precise_logits = math_ops.cast( + logits, dtypes.float32) if convert_to_float32 else logits + # labels and logits must be of the same type + labels = math_ops.cast(labels, precise_logits.dtype) + input_rank = array_ops.rank(precise_logits) + # For shape inference. + shape = logits.get_shape() + + # Move the dim to the end if dim is not the last dimension. + if axis != -1: + + def _move_dim_to_end(tensor, dim_index, rank): + return array_ops.transpose( + tensor, + array_ops.concat([ + math_ops.range(dim_index), + math_ops.range(dim_index + 1, rank), [dim_index] + ], 0)) + + precise_logits = _move_dim_to_end(precise_logits, axis, input_rank) + labels = _move_dim_to_end(labels, axis, input_rank) + + input_shape = array_ops.shape(precise_logits) + + # Make precise_logits and labels into matrices. + precise_logits = _flatten_outer_dims(precise_logits) + labels = _flatten_outer_dims(labels) + + # Do the actual op computation. + if config.is_op_determinism_enabled(): + log_probs = log_softmax_v2(precise_logits) + cost = -math_ops.reduce_sum(labels * log_probs, axis=1) + else: + # The second output tensor contains the gradients. We use it in + # CrossEntropyGrad() in nn_grad but not here. + cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits( + precise_logits, labels, name=name) + + # The output cost shape should be the input minus axis. + output_shape = array_ops.slice(input_shape, [0], + [math_ops.subtract(input_rank, 1)]) + cost = array_ops.reshape(cost, output_shape) + + # Make shape inference work since reshape and transpose may erase its static + # shape. + if not context.executing_eagerly( + ) and shape is not None and shape.dims is not None: + shape = shape.as_list() + del shape[axis] + cost.set_shape(shape) + + if convert_to_float32: + return math_ops.cast(cost, logits.dtype) + else: + return cost + + +_XENT_DEPRECATION = """ +Future major versions of TensorFlow will allow gradients to flow +into the labels input on backprop by default. + +See `tf.nn.softmax_cross_entropy_with_logits_v2`. +""" + + +@tf_export(v1=["nn.softmax_cross_entropy_with_logits"]) +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION) +def softmax_cross_entropy_with_logits( + labels=None, + logits=None, + dim=-1, + name=None, + axis=None): + """Computes softmax cross entropy between `logits` and `labels`. + + Measures the probability error in discrete classification tasks in which the + classes are mutually exclusive (each entry is in exactly one class). For + example, each CIFAR-10 image is labeled with one and only one label: an image + can be a dog or a truck, but not both. + + **NOTE:** While the classes are mutually exclusive, their probabilities + need not be. All that is required is that each row of `labels` is + a valid probability distribution. If they are not, the computation of the + gradient will be incorrect. + + If using exclusive `labels` (wherein one and only + one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. + + **WARNING:** This op expects unscaled logits, since it performs a `softmax` + on `logits` internally for efficiency. Do not call this op with the + output of `softmax`, as it will produce incorrect results. + + A common use case is to have logits and labels of shape + `[batch_size, num_classes]`, but higher dimensions are supported, with + the `dim` argument specifying the class dimension. + + Backpropagation will happen only into `logits`. To calculate a cross entropy + loss that allows backpropagation into both `logits` and `labels`, see + `tf.nn.softmax_cross_entropy_with_logits_v2`. + + **Note that to avoid confusion, it is required to pass only named arguments to + this function.** + + Args: + labels: Each vector along the class dimension should hold a valid + probability distribution e.g. for the case in which labels are of shape + `[batch_size, num_classes]`, each row of `labels[i]` must be a valid + probability distribution. + logits: Per-label activations, typically a linear output. These activation + energies are interpreted as unnormalized log probabilities. + dim: The class dimension. Defaulted to -1 which is the last dimension. + name: A name for the operation (optional). + axis: Alias for dim. + + Returns: + A `Tensor` that contains the softmax cross entropy loss. Its type is the + same as `logits` and its shape is the same as `labels` except that it does + not have the last dimension of `labels`. + """ + dim = deprecated_argument_lookup("axis", axis, "dim", dim) + _ensure_xent_args("softmax_cross_entropy_with_logits", labels, logits) + + with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", + [logits, labels]) as name: + labels = array_ops.stop_gradient(labels, name="labels_stop_gradient") + + return softmax_cross_entropy_with_logits_v2( + labels=labels, logits=logits, axis=dim, name=name) + + +def _sparse_softmax_cross_entropy_with_rank_2_logits(logits, labels, name): + if config.is_op_determinism_enabled(): + # TODO(duncanriach): Implement a GPU-deterministic version of this op at + # the C++/CUDA level. + + # The actual op functionality + log_probs = log_softmax_v2(logits) + cost = math_ops.negative(array_ops.gather(log_probs, labels, batch_dims=1)) + + # Force the output to be NaN when the corresponding label is invalid. + # Without the selective gradient gating provided by the following code, + # backprop into the actual op functionality above, when there are invalid + # labels, leads to corruption of the gradients associated with valid labels. + # TODO(duncanriach): Uncover the source of the aforementioned corruption. + nan_tensor = constant_op.constant(float("Nan"), dtype=logits.dtype) + cost_all_nans = array_ops.broadcast_to(nan_tensor, array_ops.shape(cost)) + class_count = math_ops.cast(array_ops.shape(logits)[-1], labels.dtype) + cost = array_ops.where( + math_ops.logical_or( + math_ops.less(labels, 0), + math_ops.greater_equal(labels, class_count)), cost_all_nans, cost) + else: + # The second output tensor contains the gradients. We use it in + # _CrossEntropyGrad() in nn_grad but not here. + cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( + logits, labels, name=name) + return cost + + +@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"]) +@dispatch.add_dispatch_support +def sparse_softmax_cross_entropy_with_logits( + labels=None, + logits=None, + name=None): + """Computes sparse softmax cross entropy between `logits` and `labels`. + + Measures the probability error in discrete classification tasks in which the + classes are mutually exclusive (each entry is in exactly one class). For + example, each CIFAR-10 image is labeled with one and only one label: an image + can be a dog or a truck, but not both. + + **NOTE:** For this operation, the probability of a given label is considered + exclusive. That is, soft classes are not allowed, and the `labels` vector + must provide a single specific index for the true class for each row of + `logits` (each minibatch entry). For soft softmax classification with + a probability distribution for each entry, see + `softmax_cross_entropy_with_logits_v2`. + + **WARNING:** This op expects unscaled logits, since it performs a `softmax` + on `logits` internally for efficiency. Do not call this op with the + output of `softmax`, as it will produce incorrect results. + + A common use case is to have logits of shape + `[batch_size, num_classes]` and have labels of shape + `[batch_size]`, but higher dimensions are supported, in which + case the `dim`-th dimension is assumed to be of size `num_classes`. + `logits` must have the dtype of `float16`, `float32`, or `float64`, and + `labels` must have the dtype of `int32` or `int64`. + + **Note that to avoid confusion, it is required to pass only named arguments to + this function.** + + Args: + labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of + `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` + must be an index in `[0, num_classes)`. Other values will raise an + exception when this op is run on CPU, and return `NaN` for corresponding + loss and gradient rows on GPU. + logits: Per-label activations (typically a linear output) of shape + `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or + `float64`. These activation energies are interpreted as unnormalized log + probabilities. + name: A name for the operation (optional). + + Returns: + A `Tensor` of the same shape as `labels` and of the same type as `logits` + with the softmax cross entropy loss. + + Raises: + ValueError: If logits are scalars (need to have rank >= 1) or if the rank + of the labels is not equal to the rank of the logits minus one. + """ + _ensure_xent_args("sparse_softmax_cross_entropy_with_logits", labels, logits) + + # TODO(pcmurray) Raise an error when the label is not an index in + # [0, num_classes). Note: This could break users who call this with bad + # labels, but disregard the bad results. + + # Reshape logits and labels to rank 2. + with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits", + [labels, logits]): + labels = ops.convert_to_tensor(labels) + logits = ops.convert_to_tensor(logits) + precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype( + logits.dtype) == dtypes.float16) else logits + + # Store label shape for result later. + labels_static_shape = labels.get_shape() + labels_shape = array_ops.shape(labels) + static_shapes_fully_defined = ( + labels_static_shape.is_fully_defined() and + logits.get_shape()[:-1].is_fully_defined()) + if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0: + raise ValueError( + f"`logits` cannot be a scalar. Received logits={logits}`") + if logits.get_shape().ndims is not None and ( + labels_static_shape.ndims is not None and + labels_static_shape.ndims != logits.get_shape().ndims - 1): + raise ValueError( + "`labels.shape.rank` must equal `logits.shape.rank - 1`. " + f"Received: labels.shape={labels_static_shape} of rank " + f"{labels_static_shape.rank} and logits.shape={logits.get_shape()} " + f"of rank {logits.get_shape().rank}") + if (static_shapes_fully_defined and + labels_static_shape != logits.get_shape()[:-1]): + raise ValueError( + "`labels.shape` must equal `logits.shape` except for " + f"the last dimension. Received: labels.shape={labels_static_shape} " + f"and logits.shape={logits.get_shape()}") + # Check if no reshapes are required. + if logits.get_shape().ndims == 2: + cost = _sparse_softmax_cross_entropy_with_rank_2_logits( + precise_logits, labels, name=name) + if logits.dtype == dtypes.float16: + return math_ops.cast(cost, dtypes.float16) + else: + return cost + + # Perform a check of the dynamic shapes if the static shapes are not fully + # defined. + shape_checks = [] + if not static_shapes_fully_defined: + shape_checks.append( + check_ops.assert_equal( + array_ops.shape(labels), + array_ops.shape(logits)[:-1])) + with ops.control_dependencies(shape_checks): + # Reshape logits to 2 dim, labels to 1 dim. + num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1] + precise_logits = array_ops.reshape(precise_logits, [-1, num_classes]) + labels = array_ops.reshape(labels, [-1]) + cost = _sparse_softmax_cross_entropy_with_rank_2_logits( + precise_logits, labels, name=name) + cost = array_ops.reshape(cost, labels_shape) + cost.set_shape(labels_static_shape) + if logits.dtype == dtypes.float16: + return math_ops.cast(cost, dtypes.float16) + else: + return cost + + +@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[]) +@dispatch.add_dispatch_support +def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None): + """Computes sparse softmax cross entropy between `logits` and `labels`. + + Measures the probability error in discrete classification tasks in which the + classes are mutually exclusive (each entry is in exactly one class). For + example, each CIFAR-10 image is labeled with one and only one label: an image + can be a dog or a truck, but not both. + + Note: For this operation, the probability of a given label is considered + exclusive. That is, soft classes are not allowed, and the `labels` vector + must provide a single specific index for the true class for each row of + `logits` (each minibatch entry). For soft softmax classification with + a probability distribution for each entry, see + `softmax_cross_entropy_with_logits_v2`. + + Warning: This op expects unscaled logits, since it performs a `softmax` + on `logits` internally for efficiency. Do not call this op with the + output of `softmax`, as it will produce incorrect results. + + A common use case is to have logits of shape + `[batch_size, num_classes]` and have labels of shape + `[batch_size]`, but higher dimensions are supported, in which + case the `dim`-th dimension is assumed to be of size `num_classes`. + `logits` must have the dtype of `float16`, `float32`, or `float64`, and + `labels` must have the dtype of `int32` or `int64`. + + >>> logits = tf.constant([[2., -5., .5, -.1], + ... [0., 0., 1.9, 1.4], + ... [-100., 100., -100., -100.]]) + >>> labels = tf.constant([0, 3, 1]) + >>> tf.nn.sparse_softmax_cross_entropy_with_logits( + ... labels=labels, logits=logits).numpy() + array([0.29750752, 1.1448325 , 0. ], dtype=float32) + + To avoid confusion, passing only named arguments to this function is + recommended. + + Args: + labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of + `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` + must be an index in `[0, num_classes)`. Other values will raise an + exception when this op is run on CPU, and return `NaN` for corresponding + loss and gradient rows on GPU. + logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, + num_classes]` and dtype `float16`, `float32`, or `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of the same shape as `labels` and of the same type as `logits` + with the softmax cross entropy loss. + + Raises: + ValueError: If logits are scalars (need to have rank >= 1) or if the rank + of the labels is not equal to the rank of the logits minus one. + """ + return sparse_softmax_cross_entropy_with_logits( + labels=labels, logits=logits, name=name) + + +@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"]) +@dispatch.add_dispatch_support +def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin + """Performs the avg pooling on the input. + + Each entry in `output` is the mean of the corresponding size `ksize` + window in `value`. + + Args: + input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + + [num_channels]` if `data_format` does not start with "NC" (default), or + `[batch_size, num_channels] + input_spatial_shape` if data_format starts + with "NC". Pooling happens over the spatial dimensions only. + ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size + of the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The + stride of the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string. Specifies the channel dimension. For N=1 it can be + either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) + or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". + name: Optional name for the operation. + + Returns: + A `Tensor` of format specified by `data_format`. + The average pooled output tensor. + """ + if input.shape is not None: + n = len(input.shape) - 2 + elif data_format is not None: + n = len(data_format) - 2 + else: + raise ValueError( + "`input` must have a static shape or `data_format` must be given. " + f"Received: input.shape={input.shape} and " + f"data_format={data_format}") + if not 1 <= n <= 3: + raise ValueError( + f"`input.shape.rank` must be 3, 4 or 5. Received: " + f"input.shape={input.shape} of rank {n + 2}.") + + if data_format is None: + channel_index = n + 1 + else: + channel_index = 1 if data_format.startswith("NC") else n + 1 + + ksize = _get_sequence(ksize, n, channel_index, "ksize") + strides = _get_sequence(strides, n, channel_index, "strides") + + avg_pooling_ops = { + 1: avg_pool1d, + 2: gen_nn_ops.avg_pool, + 3: gen_nn_ops.avg_pool3d + } + + op = avg_pooling_ops[n] + return op( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) + + +@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"]) +@dispatch.add_dispatch_support +def avg_pool(value, ksize, strides, padding, data_format="NHWC", + name=None, input=None): # pylint: disable=redefined-builtin + """Performs the average pooling on the input. + + Each entry in `output` is the mean of the corresponding size `ksize` + window in `value`. + + Args: + value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type + `float32`, `float64`, `qint8`, `quint8`, or `qint32`. + ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of + the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. + See the "returns" section of `tf.nn.convolution` for details. + data_format: A string. 'NHWC' and 'NCHW' are supported. + name: Optional name for the operation. + input: Alias for value. + + Returns: + A `Tensor` with the same type as `value`. The average pooled output tensor. + """ + with ops.name_scope(name, "AvgPool", [value]) as name: + value = deprecation.deprecated_argument_lookup( + "input", input, "value", value) + + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + ksize = _get_sequence(ksize, 2, channel_index, "ksize") + strides = _get_sequence(strides, 2, channel_index, "strides") + + return gen_nn_ops.avg_pool( + value, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) + + +@tf_export("nn.avg_pool2d", v1=[]) +@dispatch.add_dispatch_support +def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin + """Performs the average pooling on the input. + + Each entry in `output` is the mean of the corresponding size `ksize` + window in `value`. + + Args: + input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type + `float32`, `float64`, `qint8`, `quint8`, or `qint32`. + ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of + the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string. 'NHWC' and 'NCHW' are supported. + name: Optional name for the operation. + + Returns: + A `Tensor` with the same type as `value`. The average pooled output tensor. + """ + with ops.name_scope(name, "AvgPool2D", [input]) as name: + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + ksize = _get_sequence(ksize, 2, channel_index, "ksize") + strides = _get_sequence(strides, 2, channel_index, "strides") + + return gen_nn_ops.avg_pool( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) + + +@tf_export("nn.avg_pool1d") +@dispatch.add_dispatch_support +def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin + """Performs the average pooling on the input. + + Each entry in `output` is the mean of the corresponding size `ksize` + window in `value`. + + Note internally this op reshapes and uses the underlying 2d operation. + + Args: + input: A 3-D `Tensor` of the format specified by `data_format`. + ksize: An int or list of `ints` that has length `1` or `3`. The size of the + window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1` or `3`. The stride of + the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". + name: A name for the operation (optional). + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + """ + with ops.name_scope(name, "AvgPool1D", [input]) as name: + if data_format is None: + data_format = "NWC" + channel_index = 1 if data_format.startswith("NC") else 2 + ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") + strides = [1] + _get_sequence(strides, 1, channel_index, "strides") + + expanding_dim = 1 if data_format == "NWC" else 2 + data_format = "NHWC" if data_format == "NWC" else "NCHW" + + input = array_ops.expand_dims_v2(input, expanding_dim) + result = gen_nn_ops.avg_pool( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) + return array_ops.squeeze(result, expanding_dim) + + +@tf_export("nn.avg_pool3d") +@dispatch.add_dispatch_support +def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin + """Performs the average pooling on the input. + + Each entry in `output` is the mean of the corresponding size `ksize` + window in `value`. + + Args: + input: A 5-D `Tensor` of shape `[batch, depth, height, width, channels]` + and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. + ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of + the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `3` or `5`. The + stride of the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A string. 'NDHWC' and 'NCDHW' are supported. + name: Optional name for the operation. + + Returns: + A `Tensor` with the same type as `value`. The average pooled output tensor. + """ + with ops.name_scope(name, "AvgPool3D", [input]) as name: + if data_format is None: + data_format = "NDHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + ksize = _get_sequence(ksize, 3, channel_index, "ksize") + strides = _get_sequence(strides, 3, channel_index, "strides") + + return gen_nn_ops.avg_pool3d( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) + + +# pylint: disable=redefined-builtin +@tf_export("nn.max_pool", v1=["nn.max_pool_v2"]) +@dispatch.add_dispatch_support +def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None): + """Performs max pooling on the input. + + For a given window of `ksize`, takes the maximum value within that window. + Used for reducing computation and preventing overfitting. + + Consider an example of pooling with 2x2, non-overlapping windows: + + >>> matrix = tf.constant([ + ... [0, 0, 1, 7], + ... [0, 2, 0, 0], + ... [5, 2, 0, 0], + ... [0, 0, 9, 8], + ... ]) + >>> reshaped = tf.reshape(matrix, (1, 4, 4, 1)) + >>> tf.nn.max_pool(reshaped, ksize=2, strides=2, padding="SAME") + + + We can adjust the window size using the `ksize` parameter. For example, if we + were to expand the window to 3: + + >>> tf.nn.max_pool(reshaped, ksize=3, strides=2, padding="SAME") + + + We've now picked up two additional large numbers (5 and 9) in two of the + pooled spots. + + Note that our windows are now overlapping, since we're still moving by 2 units + on each iteration. This is causing us to see the same 9 repeated twice, since + it is part of two overlapping windows. + + We can adjust how far we move our window with each iteration using the + `strides` parameter. Updating this to the same value as our window size + eliminates the overlap: + + >>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="SAME") + + + Because the window does not neatly fit into our input, padding is added around + the edges, giving us the same result as when we used a 2x2 window. We can skip + padding altogether and simply drop the windows that do not fully fit into our + input by instead passing `"VALID"` to the `padding` argument: + + >>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="VALID") + + + Now we've grabbed the largest value in the 3x3 window starting from the upper- + left corner. Since no other windows fit in our input, they are dropped. + + Args: + input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + + [num_channels]` if `data_format` does not start with "NC" (default), or + `[batch_size, num_channels] + input_spatial_shape` if data_format starts + with "NC". Pooling happens over the spatial dimensions only. + ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size + of the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The + stride of the sliding window for each dimension of the input tensor. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit + padding, the size of the paddings cannot be greater than the sliding + window size. + data_format: A string. Specifies the channel dimension. For N=1 it can be + either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) + or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". + name: Optional name for the operation. + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + + Raises: + ValueError: If + - explicit padding is used with an input tensor of rank 5. + - explicit padding is used with data_format='NCHW_VECT_C'. + """ + if input.shape is not None: + n = len(input.shape) - 2 + elif data_format is not None: + n = len(data_format) - 2 + else: + raise ValueError( + "`input` must have a static shape or a data format must be given. " + f"Received: input.shape={input.shape} and " + f"data_format={data_format}") + if not 1 <= n <= 3: + raise ValueError( + f"`input.shape.rank` must be 3, 4 or 5. Received: " + f"input.shape={input.shape} of rank {n + 2}.") + if data_format is None: + channel_index = n + 1 + else: + channel_index = 1 if data_format.startswith("NC") else n + 1 + + if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": + raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " + f"explicit padding. Received: padding={padding}") + + ksize = _get_sequence(ksize, n, channel_index, "ksize") + strides = _get_sequence(strides, n, channel_index, "strides") + + if (isinstance(padding, (list, tuple)) and n == 3): + raise ValueError("Explicit padding is not supported with an input " + f"tensor of rank 5. Received: padding={padding}") + + max_pooling_ops = { + 1: max_pool1d, + 2: max_pool2d, + 3: gen_nn_ops.max_pool3d + } + + op = max_pooling_ops[n] + return op( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) +# pylint: enable=redefined-builtin + + +@tf_export(v1=["nn.max_pool"]) +@dispatch.add_dispatch_support +def max_pool(value, + ksize, + strides, + padding, + data_format="NHWC", + name=None, + input=None): # pylint: disable=redefined-builtin + """Performs the max pooling on the input. + + Args: + value: A 4-D `Tensor` of the format specified by `data_format`. + ksize: An int or list of `ints` that has length `1`, `2` or `4`. + The size of the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `2` or `4`. + The stride of the sliding window for each dimension of the input tensor. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. When explicit padding is used and + data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, + pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used + and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit + padding, the size of the paddings cannot be greater than the sliding + window size. + data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. + name: Optional name for the operation. + input: Alias for value. + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + """ + value = deprecation.deprecated_argument_lookup("input", input, "value", value) + with ops.name_scope(name, "MaxPool", [value]) as name: + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + ksize = _get_sequence(ksize, 2, channel_index, "ksize") + strides = _get_sequence(strides, 2, channel_index, "strides") + if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": + raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " + f"explicit padding. Received: padding={padding}") + padding, explicit_paddings = convert_padding(padding) + if ((np.isscalar(ksize) and ksize == 0) or + (isinstance(ksize, + (list, tuple, np.ndarray)) and any(v == 0 for v in ksize))): + raise ValueError(f"`ksize` cannot be zero. Received: ksize={ksize}") + + return gen_nn_ops.max_pool( + value, + ksize=ksize, + strides=strides, + padding=padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + name=name) + + +# pylint: disable=redefined-builtin +@tf_export("nn.max_pool1d") +@dispatch.add_dispatch_support +def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): + """Performs the max pooling on the input. + + Note internally this op reshapes and uses the underlying 2d operation. + + Args: + input: A 3-D `Tensor` of the format specified by `data_format`. + ksize: An int or list of `ints` that has length `1` or `3`. The size of the + window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1` or `3`. The stride of + the sliding window for each dimension of the input tensor. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NWC"`, this should be in the form `[[0, 0], [pad_left, pad_right], [0, + 0]]`. When explicit padding used and data_format is `"NCW"`, this should + be in the form `[[0, 0], [0, 0], [pad_left, pad_right]]`. When using + explicit padding, the size of the paddings cannot be greater than the + sliding window size. + data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". + name: A name for the operation (optional). + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + """ + with ops.name_scope(name, "MaxPool1d", [input]) as name: + if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": + raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " + f"explicit padding. Received: padding={padding}") + if data_format is None: + data_format = "NWC" + channel_index = 1 if data_format.startswith("NC") else 2 + ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") + strides = [1] + _get_sequence(strides, 1, channel_index, "strides") + padding, explicit_paddings = convert_padding(padding, 3) + if padding == "EXPLICIT": + explicit_paddings = [0, 0] + explicit_paddings + + expanding_dim = 1 if data_format == "NWC" else 2 + data_format = "NHWC" if data_format == "NWC" else "NCHW" + + input = array_ops.expand_dims_v2(input, expanding_dim) + result = gen_nn_ops.max_pool( + input, + ksize=ksize, + strides=strides, + padding=padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + name=name) + return array_ops.squeeze(result, expanding_dim) +# pylint: enable=redefined-builtin + + +# pylint: disable=redefined-builtin +@tf_export("nn.max_pool2d") +@dispatch.add_dispatch_support +def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): + """Performs max pooling on 2D spatial data such as images. + + This is a more specific version of `tf.nn.max_pool` where the input tensor + is 4D, representing 2D spatial data such as images. Using these APIs are + equivalent + + Downsamples the input images along theirs spatial dimensions (height and + width) by taking its maximum over an input window defined by `ksize`. + The window is shifted by `strides` along each dimension. + + For example, for `strides=(2, 2)` and `padding=VALID` windows that extend + outside of the input are not included in the output: + + >>> x = tf.constant([[1., 2., 3., 4.], + ... [5., 6., 7., 8.], + ... [9., 10., 11., 12.]]) + >>> # Add the `batch` and `channels` dimensions. + >>> x = x[tf.newaxis, :, :, tf.newaxis] + >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), + ... padding="VALID") + >>> result[0, :, :, 0] + + + With `padding=SAME`, we get: + + >>> x = tf.constant([[1., 2., 3., 4.], + ... [5., 6., 7., 8.], + ... [9., 10., 11., 12.]]) + >>> x = x[tf.newaxis, :, :, tf.newaxis] + >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), + ... padding='SAME') + >>> result[0, :, :, 0] + + + We can also specify padding explicitly. The following example adds width-1 + padding on all sides (top, bottom, left, right): + + >>> x = tf.constant([[1., 2., 3., 4.], + ... [5., 6., 7., 8.], + ... [9., 10., 11., 12.]]) + >>> x = x[tf.newaxis, :, :, tf.newaxis] + >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), + ... padding=[[0, 0], [1, 1], [1, 1], [0, 0]]) + >>> result[0, :, :, 0] + + + For more examples and detail, see `tf.nn.max_pool`. + + Args: + input: A 4-D `Tensor` of the format specified by `data_format`. + ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of + the window for each dimension of the input tensor. If only one integer is + specified, then we apply the same window for all 4 dims. If two are + provided then we use those for H, W dimensions and keep N, C dimension + window size = 1. + strides: An int or list of `ints` that has length `1`, `2` or `4`. The + stride of the sliding window for each dimension of the input tensor. If + only one integer is specified, we apply the same stride to all 4 dims. If + two are provided we use those for the H, W dimensions and keep N, C of + stride = 1. + padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of + padding algorithm to use, or a list indicating the explicit paddings at + the start and end of each dimension. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. When explicit padding is used and data_format is + `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], + [pad_left, pad_right], [0, 0]]`. When explicit padding used and + data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], + [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit + padding, the size of the paddings cannot be greater than the sliding + window size. + data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. + name: Optional name for the operation. + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + + Raises: + ValueError: If explicit padding is used with data_format='NCHW_VECT_C'. + """ + with ops.name_scope(name, "MaxPool2d", [input]) as name: + if data_format is None: + data_format = "NHWC" + channel_index = 1 if data_format.startswith("NC") else 3 + + ksize = _get_sequence(ksize, 2, channel_index, "ksize") + strides = _get_sequence(strides, 2, channel_index, "strides") + if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": + raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " + f"explicit padding. Received: padding={padding}") + padding, explicit_paddings = convert_padding(padding) + + return gen_nn_ops.max_pool( + input, + ksize=ksize, + strides=strides, + padding=padding, + explicit_paddings=explicit_paddings, + data_format=data_format, + name=name) +# pylint: enable=redefined-builtin + + +# pylint: disable=redefined-builtin +@tf_export("nn.max_pool3d") +@dispatch.add_dispatch_support +def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): + """Performs the max pooling on the input. + + Args: + input: A 5-D `Tensor` of the format specified by `data_format`. + ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of + the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `3` or `5`. The + stride of the sliding window for each dimension of the input tensor. + padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". + The data format of the input and output data. With the default format + "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, + in_width, in_channels]. Alternatively, the format could be "NCDHW", the + data storage order is: [batch, in_channels, in_depth, in_height, + in_width]. + name: A name for the operation (optional). + + Returns: + A `Tensor` of format specified by `data_format`. + The max pooled output tensor. + """ + with ops.name_scope(name, "MaxPool3D", [input]) as name: + if data_format is None: + data_format = "NDHWC" + channel_index = 1 if data_format.startswith("NC") else 4 + + ksize = _get_sequence(ksize, 3, channel_index, "ksize") + strides = _get_sequence(strides, 3, channel_index, "strides") + + return gen_nn_ops.max_pool3d( + input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + name=name) +# pylint: enable=redefined-builtin + + +@tf_export("nn.max_pool_with_argmax", v1=[]) +@dispatch.add_dispatch_support +def max_pool_with_argmax_v2( + input, # pylint: disable=redefined-builtin + ksize, + strides, + padding, + data_format="NHWC", + output_dtype=dtypes.int64, + include_batch_in_index=False, + name=None): + """Performs max pooling on the input and outputs both max values and indices. + + The indices in `argmax` are flattened, so that a maximum value at position + `[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if + `include_batch_in_index` is False; + `((b * height + y) * width + x) * channels + c` + if `include_batch_in_index` is True. + + The indices returned are always in `[0, height) x [0, width)` before + flattening, even if padding is involved and the mathematically correct answer + is outside (either negative or too large). This is a bug, but fixing it is + difficult to do in a safe backwards compatible way, especially due to + flattening. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, + `uint32`, `uint64`. + 4-D with shape `[batch, height, width, channels]`. Input to pool over. + ksize: An int or list of `ints` that has length `1`, `2` or `4`. + The size of the window for each dimension of the input tensor. + strides: An int or list of `ints` that has length `1`, `2` or `4`. + The stride of the sliding window for each dimension of the + input tensor. + padding: A `string` from: `"SAME", "VALID"`. + The type of padding algorithm to use. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: An optional `string`, must be set to `"NHWC"`. Defaults to + `"NHWC"`. + Specify the data format of the input and output data. + output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. + Defaults to `tf.int64`. + The dtype of the returned argmax tensor. + include_batch_in_index: An optional `boolean`. Defaults to `False`. + Whether to include batch dimension in flattened index of `argmax`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, argmax). + + output: A `Tensor`. Has the same type as `input`. + argmax: A `Tensor` of type `output_dtype`. + """ + + if data_format != "NHWC": + raise ValueError("`data_format` values other than 'NHWC' are not " + f"supported. Received: data_format={data_format}") + + ksize = _get_sequence(ksize, 2, 3, "ksize") + strides = _get_sequence(strides, 2, 3, "strides") + + return gen_nn_ops.max_pool_with_argmax( + input=input, + ksize=ksize, + strides=strides, + padding=padding, + Targmax=output_dtype, + include_batch_in_index=include_batch_in_index, + name=name) + + +@tf_export(v1=["nn.max_pool_with_argmax"]) +@dispatch.add_dispatch_support +def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name + input, # pylint: disable=redefined-builtin + ksize, + strides, + padding, + data_format="NHWC", + Targmax=None, + name=None, + output_dtype=None, + include_batch_in_index=False): + if data_format != "NHWC": + raise ValueError("`data_format` values other than 'NHWC' are not " + f"supported. Received: data_format={data_format}") + + Targmax = deprecated_argument_lookup( + "output_dtype", output_dtype, "Targmax", Targmax) + if Targmax is None: + Targmax = dtypes.int64 + return gen_nn_ops.max_pool_with_argmax( + input=input, + ksize=ksize, + strides=strides, + padding=padding, + Targmax=Targmax, + include_batch_in_index=include_batch_in_index, + name=name) + + +max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__ + + +@ops.RegisterStatistics("Conv3D", "flops") +def _calc_conv3d_flops(graph, node): + """Calculates the compute resources needed for Conv3D.""" + input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) + input_shape.assert_is_fully_defined() + filter_shape = graph_util.tensor_shape_from_node_def_name( + graph, node.input[1]) + filter_shape.assert_is_fully_defined() + output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) + output_shape.assert_is_fully_defined() + filter_time = int(filter_shape[0]) + filter_height = int(filter_shape[1]) + filter_width = int(filter_shape[2]) + filter_in_depth = int(filter_shape[3]) + output_count = np.prod(output_shape.as_list(), dtype=np.int64) + return ops.OpStats("flops", (output_count * filter_in_depth * filter_time * + filter_height * filter_width * 2)) + + +@ops.RegisterStatistics("Conv2D", "flops") +def _calc_conv_flops(graph, node): + """Calculates the compute resources needed for Conv2D.""" + input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) + input_shape.assert_is_fully_defined() + filter_shape = graph_util.tensor_shape_from_node_def_name( + graph, node.input[1]) + filter_shape.assert_is_fully_defined() + output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) + output_shape.assert_is_fully_defined() + filter_height = int(filter_shape[0]) + filter_width = int(filter_shape[1]) + filter_in_depth = int(filter_shape[2]) + output_count = np.prod(output_shape.as_list(), dtype=np.int64) + return ops.OpStats( + "flops", + (output_count * filter_in_depth * filter_height * filter_width * 2)) + + +@ops.RegisterStatistics("DepthwiseConv2dNative", "flops") +def _calc_depthwise_conv_flops(graph, node): + """Calculates the compute resources needed for DepthwiseConv2dNative.""" + input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) + input_shape.assert_is_fully_defined() + filter_shape = graph_util.tensor_shape_from_node_def_name( + graph, node.input[1]) + filter_shape.assert_is_fully_defined() + output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) + output_shape.assert_is_fully_defined() + filter_height = int(filter_shape[0]) + filter_width = int(filter_shape[1]) + output_count = np.prod(output_shape.as_list(), dtype=np.int64) + return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) + + +@ops.RegisterStatistics("BiasAdd", "flops") +def _calc_bias_add_flops(graph, node): + """Calculates the computing needed for BiasAdd.""" + input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) + input_shape.assert_is_fully_defined() + input_count = np.prod(input_shape.as_list()) + return ops.OpStats("flops", input_count) + + +@tf_export(v1=["nn.xw_plus_b"]) +@dispatch.add_dispatch_support +def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name + """Computes matmul(x, weights) + biases. + + Args: + x: a 2D tensor. Dimensions typically: batch, in_units + weights: a 2D tensor. Dimensions typically: in_units, out_units + biases: a 1D tensor. Dimensions: out_units + name: A name for the operation (optional). If not specified + "xw_plus_b" is used. + + Returns: + A 2-D Tensor computing matmul(x, weights) + biases. + Dimensions typically: batch, out_units. + """ + with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name: + x = ops.convert_to_tensor(x, name="x") + weights = ops.convert_to_tensor(weights, name="weights") + biases = ops.convert_to_tensor(biases, name="biases") + mm = math_ops.matmul(x, weights) + return bias_add(mm, biases, name=name) + + +def xw_plus_b_v1(x, weights, biases, name=None): + """Computes matmul(x, weights) + biases. + + This is a deprecated version of that will soon be removed. + + Args: + x: a 2D tensor. Dimensions typically: batch, in_units + weights: a 2D tensor. Dimensions typically: in_units, out_units + biases: a 1D tensor. Dimensions: out_units + name: A name for the operation (optional). If not specified + "xw_plus_b_v1" is used. + + Returns: + A 2-D Tensor computing matmul(x, weights) + biases. + Dimensions typically: batch, out_units. + """ + with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name: + x = ops.convert_to_tensor(x, name="x") + weights = ops.convert_to_tensor(weights, name="weights") + biases = ops.convert_to_tensor(biases, name="biases") + mm = math_ops.matmul(x, weights) + return bias_add_v1(mm, biases, name=name) + + +def _get_noise_shape(x, noise_shape): + # If noise_shape is none return immediately. + if noise_shape is None: + return array_ops.shape(x) + + try: + # Best effort to figure out the intended shape. + # If not possible, let the op to handle it. + # In eager mode exception will show up. + noise_shape_ = tensor_shape.as_shape(noise_shape) + except (TypeError, ValueError): + return noise_shape + + if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): + new_dims = [] + for i, dim in enumerate(x.shape.dims): + if noise_shape_.dims[i].value is None and dim.value is not None: + new_dims.append(dim.value) + else: + new_dims.append(noise_shape_.dims[i].value) + return tensor_shape.TensorShape(new_dims) + + return noise_shape + + +@tf_export(v1=["nn.dropout"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. " + "Rate should be set to `rate = 1 - keep_prob`.", + "keep_prob") +def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None, + rate=None): + """Computes dropout. + + For each element of `x`, with probability `rate`, outputs `0`, and otherwise + scales up the input by `1 / (1-rate)`. The scaling is such that the expected + sum is unchanged. + + By default, each element is kept or dropped independently. If `noise_shape` + is specified, it must be + [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` + will make independent decisions. For example, if `shape(x) = [k, l, m, n]` + and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be + kept independently and each row and column will be kept or not kept together. + + Args: + x: A floating point tensor. + keep_prob: (deprecated) A deprecated alias for `(1-rate)`. + noise_shape: A 1-D integer `Tensor`, representing the + shape for randomly generated keep/drop flags. + seed: A Python integer. Used to create random seeds. See + `tf.random.set_seed` for behavior. + name: A name for this operation (optional). + rate: A scalar `Tensor` with the same type as `x`. The probability that each + element of `x` is discarded. + + Returns: + A Tensor of the same shape of `x`. + + Raises: + ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating + point tensor. + """ + try: + rate_from_keep_prob = 1. - keep_prob if keep_prob is not None else None + except TypeError: + raise ValueError("`keep_prob` must be a floating point number or Tensor. " + f"Received: keep_prob={keep_prob}") + + rate = deprecation.deprecated_argument_lookup( + "rate", rate, + "keep_prob", rate_from_keep_prob) + + if rate is None: + raise ValueError(f"`rate` must be provided. Received: rate={rate}") + + return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name) + + +@tf_export("nn.dropout", v1=[]) +@dispatch.add_dispatch_support +def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): + """Computes dropout: randomly sets elements to zero to prevent overfitting. + + Warning: You should consider using + `tf.nn.experimental.stateless_dropout` instead of this function. The + difference between `tf.nn.experimental.stateless_dropout` and this + function is analogous to the difference between + `tf.random.stateless_uniform` and `tf.random.uniform`. Please see + [Random number + generation](https://www.tensorflow.org/guide/random_numbers) guide + for a detailed description of the various RNG systems in TF. As the + guide states, legacy stateful RNG ops like `tf.random.uniform` and + `tf.nn.dropout` are not deprecated yet but highly discouraged, + because their states are hard to control. + + Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x. + When converting 1.x code, please use named arguments to ensure behavior stays + consistent. + + See also: `tf.keras.layers.Dropout` for a dropout layer. + + [Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN + models. Inputs elements are randomly set to zero (and the other elements are + rescaled). This encourages each node to be independently useful, as it cannot + rely on the output of other nodes. + + More precisely: With probability `rate` elements of `x` are set to `0`. + The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the + expected value is preserved. + + >>> tf.random.set_seed(0) + >>> x = tf.ones([3,5]) + >>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy() + array([[2., 0., 0., 2., 2.], + [2., 2., 2., 2., 2.], + [2., 0., 2., 0., 2.]], dtype=float32) + + >>> tf.random.set_seed(0) + >>> x = tf.ones([3,5]) + >>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy() + array([[0., 0., 0., 5., 5.], + [0., 5., 0., 5., 0.], + [5., 0., 5., 0., 5.]], dtype=float32) + + >>> tf.nn.dropout(x, rate = 0.0) == x + + + + By default, each element is kept or dropped independently. If `noise_shape` + is specified, it must be + [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` + will make independent decisions. This is useful for dropping whole + channels from an image or sequence. For example: + + >>> tf.random.set_seed(0) + >>> x = tf.ones([3,10]) + >>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy() + array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], + [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], + [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32) + + Args: + x: A floating point tensor. + rate: A scalar `Tensor` with the same type as x. The probability + that each element is dropped. For example, setting rate=0.1 would drop + 10% of input elements. + noise_shape: A 1-D integer `Tensor`, representing the + shape for randomly generated keep/drop flags. + seed: A Python integer. Used to create random seeds. See + `tf.random.set_seed` for behavior. + name: A name for this operation (optional). + + Returns: + A Tensor of the same shape of `x`. + + Raises: + ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point + tensor. `rate=1` is disallowed, because the output would be all zeros, + which is likely not what was intended. + """ + uniform_sampler = functools.partial(random_ops.random_uniform, seed=seed) + def dummy_rng_step(): + random_seed.get_seed(seed) + return _dropout(x=x, rate=rate, noise_shape=noise_shape, + uniform_sampler=uniform_sampler, + dummy_rng_step=dummy_rng_step, name=name, + default_name="dropout") + + +@tf_export("nn.experimental.stateless_dropout") +@dispatch.add_dispatch_support +def stateless_dropout(x, rate, seed, rng_alg=None, noise_shape=None, name=None): + """Computes dropout: randomly sets elements to zero to prevent overfitting. + + [Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN + models. Inputs elements are randomly set to zero (and the other elements are + rescaled). This encourages each node to be independently useful, as it cannot + rely on the output of other nodes. + + More precisely: With probability `rate` elements of `x` are set to `0`. + The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the + expected value is preserved. + + >>> x = tf.ones([3,5]) + >>> tf.nn.experimental.stateless_dropout(x, rate=0.5, seed=[1, 0]) + + + >>> x = tf.ones([3,5]) + >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) + + + >>> tf.nn.experimental.stateless_dropout(x, rate=0.0, seed=[1, 0]) == x + + + + This function is a stateless version of `tf.nn.dropout`, in the + sense that no matter how many times you call this function, the same + `seed` will lead to the same results, and different `seed` will lead + to different results. + + >>> x = tf.ones([3,5]) + >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) + + >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) + + >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[2, 0]) + + >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[2, 0]) + + + Compare the above results to those of `tf.nn.dropout` below. The + second time `tf.nn.dropout` is called with the same seed, it will + give a different output. + + >>> tf.random.set_seed(0) + >>> x = tf.ones([3,5]) + >>> tf.nn.dropout(x, rate=0.8, seed=1) + + >>> tf.nn.dropout(x, rate=0.8, seed=1) + + >>> tf.nn.dropout(x, rate=0.8, seed=2) + + >>> tf.nn.dropout(x, rate=0.8, seed=2) + + + The difference between this function and `tf.nn.dropout` is + analogous to the difference between `tf.random.stateless_uniform` + and `tf.random.uniform`. Please see [Random number + generation](https://www.tensorflow.org/guide/random_numbers) guide + for a detailed description of the various RNG systems in TF. As the + guide states, legacy stateful RNG ops like `tf.random.uniform` and + `tf.nn.dropout` are not deprecated yet but highly discouraged, + because their states are hard to control. + + By default, each element is kept or dropped independently. If `noise_shape` + is specified, it must be + [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` + will make independent decisions. This is useful for dropping whole + channels from an image or sequence. For example: + + >>> x = tf.ones([3,10]) + >>> tf.nn.experimental.stateless_dropout(x, rate=2/3, noise_shape=[1,10], + ... seed=[1, 0]) + + + Args: + x: A floating point tensor. + rate: A scalar `Tensor` with the same type as x. The probability + that each element is dropped. For example, setting rate=0.1 would drop + 10% of input elements. + seed: An integer tensor of shape `[2]`. The seed of the random numbers. + rng_alg: The algorithm used to generate the random numbers + (default to `"auto_select"`). See the `alg` argument of + `tf.random.stateless_uniform` for the supported values. + noise_shape: A 1-D integer `Tensor`, representing the + shape for randomly generated keep/drop flags. + name: A name for this operation. + + Returns: + A Tensor of the same shape and dtype of `x`. + + Raises: + ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point + tensor. `rate=1` is disallowed, because the output would be all zeros, + which is likely not what was intended. + """ + uniform_sampler = functools.partial( + stateless_random_ops.stateless_random_uniform, seed=seed, alg=rng_alg) + def dummy_rng_step(): + pass + return _dropout(x=x, rate=rate, noise_shape=noise_shape, + uniform_sampler=uniform_sampler, + dummy_rng_step=dummy_rng_step, name=name, + default_name="stateless_dropout") + + +@tf_export("nn.experimental.general_dropout") +@dispatch.add_dispatch_support +def general_dropout(x, rate, uniform_sampler, noise_shape=None, name=None): + """Computes dropout: randomly sets elements to zero to prevent overfitting. + + Please see `tf.nn.experimental.stateless_dropout` for an overview + of dropout. + + Unlike `tf.nn.experimental.stateless_dropout`, here you can supply a + custom sampler function `uniform_sampler` that (given a shape and a + dtype) generates a random, `Uniform[0, 1)`-distributed tensor (of + that shape and dtype). `uniform_sampler` can be + e.g. `tf.random.stateless_random_uniform` or + `tf.random.Generator.uniform`. + + For example, if you are using `tf.random.Generator` to generate + random numbers, you can use this code to do dropouts: + + >>> g = tf.random.Generator.from_seed(7) + >>> sampler = g.uniform + >>> x = tf.constant([1.1, 2.2, 3.3, 4.4, 5.5]) + >>> rate = 0.5 + >>> tf.nn.experimental.general_dropout(x, rate, sampler) + + >>> tf.nn.experimental.general_dropout(x, rate, sampler) + + + It has better performance than using + `tf.nn.experimental.stateless_dropout` and + `tf.random.Generator.make_seeds`: + + >>> g = tf.random.Generator.from_seed(7) + >>> x = tf.constant([1.1, 2.2, 3.3, 4.4, 5.5]) + >>> rate = 0.5 + >>> tf.nn.experimental.stateless_dropout(x, rate, g.make_seeds(1)[:, 0]) + + >>> tf.nn.experimental.stateless_dropout(x, rate, g.make_seeds(1)[:, 0]) + + + because generating and consuming seeds cost extra + computation. `tf.nn.experimental.general_dropout` can let you avoid + them. + + Args: + x: A floating point tensor. + rate: A scalar `Tensor` with the same type as x. The probability + that each element is dropped. For example, setting rate=0.1 would drop + 10% of input elements. + uniform_sampler: a callable of signature `(shape, dtype) -> + Tensor[shape, dtype]`, used to generate a tensor of uniformly-distributed + random numbers in the range `[0, 1)`, of the given shape and dtype. + noise_shape: A 1-D integer `Tensor`, representing the + shape for randomly generated keep/drop flags. + name: A name for this operation. + + Returns: + A Tensor of the same shape and dtype of `x`. + + Raises: + ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point + tensor. `rate=1` is disallowed, because the output would be all zeros, + which is likely not what was intended. + """ + def dummy_rng_step(): + pass + return _dropout(x=x, rate=rate, noise_shape=noise_shape, + uniform_sampler=uniform_sampler, + dummy_rng_step=dummy_rng_step, name=name, + default_name="general_dropout") + + +def _dropout(x, rate, noise_shape, uniform_sampler, dummy_rng_step, name, + default_name): + """Shared implementation of the various dropout functions. + + Args: + x: same as the namesake in `dropout_v2`. + rate: same as the namesake in `dropout_v2`. + noise_shape: same as the namesake in `dropout_v2`. + uniform_sampler: a callable of signature `(shape, dtype) -> + Tensor`, used to generate a tensor of uniformly-distributed + random numbers in the range `[0, 1)`, of the given shape and dtype. + dummy_rng_step: a callable of signature `() -> None`, to make a + dummy RNG call in the fast path. In the fast path where rate is + 0, we don't need to generate random numbers, but some samplers + still require you to make an RNG call, to make sure that RNG + states won't depend on whether the fast path is taken. + name: same as the namesake in `dropout_v2`. + default_name: a default name in case `name` is `None`. + + Returns: + A Tensor of the same shape and dtype of `x`. + """ + with ops.name_scope(name, default_name, [x]) as name: + is_rate_number = isinstance(rate, numbers.Real) + if is_rate_number and (rate < 0 or rate >= 1): + raise ValueError("`rate` must be a scalar tensor or a float in the " + f"range [0, 1). Received: rate={rate}") + x = ops.convert_to_tensor(x, name="x") + x_dtype = x.dtype + if not x_dtype.is_floating: + raise ValueError( + "`x.dtype` must be a floating point tensor as `x` will be " + f"scaled. Received: x_dtype={x_dtype}") + if is_rate_number and rate == 0: + # Fast-path: Return the input immediately if rate is non-tensor & is `0`. + # We trigger this after all error checking + # and after `x` has been converted to a tensor, to prevent inconsistent + # tensor conversions/error raising if rate is changed to/from 0. + # + # We also explicitly call `dummy_rng_step` to make sure + # we don't change the random number generation behavior of + # stateful random ops by entering a fastpath, + # despite not generating a random tensor in the fastpath + dummy_rng_step() + return x + + is_executing_eagerly = context.executing_eagerly() + if not tensor_util.is_tf_type(rate): + if is_rate_number: + keep_prob = 1 - rate + scale = 1 / keep_prob + scale = ops.convert_to_tensor(scale, dtype=x_dtype) + ret = gen_math_ops.mul(x, scale) + else: + raise ValueError( + f"`rate` must be a scalar or scalar tensor. Received: rate={rate}") + else: + rate.get_shape().assert_has_rank(0) + rate_dtype = rate.dtype + if rate_dtype != x_dtype: + if not rate_dtype.is_compatible_with(x_dtype): + raise ValueError( + "`x.dtype` must be compatible with `rate.dtype`. " + f"Received: x.dtype={x_dtype} and rate.dtype={rate_dtype}") + rate = gen_math_ops.cast(rate, x_dtype, name="rate") + one_tensor = constant_op.constant(1, dtype=x_dtype) + ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate)) + + noise_shape = _get_noise_shape(x, noise_shape) + # Sample a uniform distribution on [0.0, 1.0) and select values larger + # than or equal to `rate`. + random_tensor = uniform_sampler(shape=noise_shape, dtype=x_dtype) + keep_mask = random_tensor >= rate + zero_tensor = constant_op.constant(0, dtype=x_dtype) + ret = array_ops.where_v2(keep_mask, ret, zero_tensor) + if not is_executing_eagerly: + ret.set_shape(x.get_shape()) + return ret + + +@tf_export("math.top_k", "nn.top_k") +@dispatch.add_dispatch_support +def top_k(input, k=1, sorted=True, index_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin + """Finds values and indices of the `k` largest entries for the last dimension. + + If the input is a vector (rank=1), finds the `k` largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + `j`-th largest entry in `input`, and its index is `indices[j]`. + + >>> result = tf.math.top_k([1, 2, 98, 1, 1, 99, 3, 1, 3, 96, 4, 1], + ... k=3) + >>> result.values.numpy() + array([99, 98, 96], dtype=int32) + >>> result.indices.numpy() + array([5, 2, 9], dtype=int32) + + For matrices (resp. higher rank input), computes the top `k` entries in each + row (resp. vector along the last dimension). Thus, + + >>> input = tf.random.normal(shape=(3,4,5,6)) + >>> k = 2 + >>> values, indices = tf.math.top_k(input, k=k) + >>> values.shape.as_list() + [3, 4, 5, 2] + >>> + >>> values.shape == indices.shape == input.shape[:-1] + [k] + True + + The indices can be used to `gather` from a tensor who's shape matches `input`. + + >>> gathered_values = tf.gather(input, indices, batch_dims=-1) + >>> assert tf.reduce_all(gathered_values == values) + + If two elements are equal, the lower-index element appears first. + + >>> result = tf.math.top_k([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0], + ... k=3) + >>> result.indices.numpy() + array([0, 1, 3], dtype=int32) + + By default, indices are returned as type `int32`, however, this can be changed + by specifying the `index_type`. + + >>> result = tf.math.top_k([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0], + ... k=3, index_type=tf.int16) + >>> result.indices.numpy() + array([0, 1, 3], dtype=int16) + + Args: + input: 1-D or higher `Tensor` with last dimension at least `k`. + k: 0-D `Tensor` of type `int16`, `int32` or `int64`. Number of top element + to look for along the last dimension (along each row for matrices). + sorted: If true the resulting `k` elements will be sorted by the values in + descending order. + index_type: Optional dtype for output indices. + name: Optional name for the operation. + + Returns: + A tuple with two named fields: + values: The `k` largest elements along each last dimensional slice. + indices: The indices of `values` within the last dimension of `input`. + """ + return gen_nn_ops.top_kv2( + input, k=k, sorted=sorted, index_type=index_type, name=name + ) + + +@tf_export("math.approx_max_k", "nn.approx_max_k") +@dispatch.add_dispatch_support +def approx_max_k(operand, + k, + reduction_dimension=-1, + recall_target=0.95, + reduction_input_size_override=-1, + aggregate_to_topk=True, + name=None): + """Returns max `k` values and their indices of the input `operand` in an approximate manner. + + See https://arxiv.org/abs/2206.14286 for the algorithm details. This op is + only optimized on TPU currently. + + Args: + operand : Array to search for max-k. Must be a floating number type. + k : Specifies the number of max-k. + reduction_dimension : Integer dimension along which to search. Default: -1. + recall_target : Recall target for the approximation. + reduction_input_size_override : When set to a positive value, it overrides + the size determined by `operand[reduction_dim]` for evaluating the recall. + This option is useful when the given `operand` is only a subset of the + overall computation in SPMD or distributed pipelines, where the true input + size cannot be deferred by the `operand` shape. + aggregate_to_topk : When true, aggregates approximate results to top-k. When + false, returns the approximate results. The number of the approximate + results is implementation defined and is greater equals to the specified + `k`. + name: Optional name for the operation. + + Returns: + Tuple of two arrays. The arrays are the max `k` values and the + corresponding indices along the `reduction_dimension` of the input + `operand`. The arrays' dimensions are the same as the input `operand` + except for the `reduction_dimension`: when `aggregate_to_topk` is true, + the reduction dimension is `k`; otherwise, it is greater equals to `k` + where the size is implementation-defined. + + We encourage users to wrap `approx_max_k` with jit. See the following + example for maximal inner production search (MIPS): + + >>> import tensorflow as tf + >>> @tf.function(jit_compile=True) + ... def mips(qy, db, k=10, recall_target=0.95): + ... dists = tf.einsum('ik,jk->ij', qy, db) + ... # returns (f32[qy_size, k], i32[qy_size, k]) + ... return tf.nn.approx_max_k(dists, k=k, recall_target=recall_target) + >>> + >>> qy = tf.random.uniform((256,128)) + >>> db = tf.random.uniform((2048,128)) + >>> dot_products, neighbors = mips(qy, db, k=20) + """ + return gen_nn_ops.approx_top_k( + operand, + k=k, + reduction_dimension=reduction_dimension, + recall_target=recall_target, + is_max_k=True, + reduction_input_size_override=reduction_input_size_override, + aggregate_to_topk=aggregate_to_topk, + name=name) + + +@tf_export("math.approx_min_k", "nn.approx_min_k") +@dispatch.add_dispatch_support +def approx_min_k(operand, + k, + reduction_dimension=-1, + recall_target=0.95, + reduction_input_size_override=-1, + aggregate_to_topk=True, + name=None): + """Returns min `k` values and their indices of the input `operand` in an approximate manner. + + See https://arxiv.org/abs/2206.14286 for the algorithm details. This op is + only optimized on TPU currently. + + Args: + operand : Array to search for min-k. Must be a floating number type. + k : Specifies the number of min-k. + reduction_dimension: Integer dimension along which to search. Default: -1. + recall_target: Recall target for the approximation. + reduction_input_size_override : When set to a positive value, it overrides + the size determined by `operand[reduction_dim]` for evaluating the recall. + This option is useful when the given `operand` is only a subset of the + overall computation in SPMD or distributed pipelines, where the true input + size cannot be deferred by the `operand` shape. + aggregate_to_topk: When true, aggregates approximate results to top-k. When + false, returns the approximate results. The number of the approximate + results is implementation defined and is greater equals to the specified + `k`. + name: Optional name for the operation. + + Returns: + Tuple of two arrays. The arrays are the least `k` values and the + corresponding indices along the `reduction_dimension` of the input + `operand`. The arrays' dimensions are the same as the input `operand` + except for the `reduction_dimension`: when `aggregate_to_topk` is true, + the reduction dimension is `k`; otherwise, it is greater equals to `k` + where the size is implementation-defined. + + We encourage users to wrap `approx_min_k` with jit. See the following example + for nearest neighbor search over the squared l2 distance: + + >>> import tensorflow as tf + >>> @tf.function(jit_compile=True) + ... def l2_ann(qy, db, half_db_norms, k=10, recall_target=0.95): + ... dists = half_db_norms - tf.einsum('ik,jk->ij', qy, db) + ... return tf.nn.approx_min_k(dists, k=k, recall_target=recall_target) + >>> + >>> qy = tf.random.uniform((256,128)) + >>> db = tf.random.uniform((2048,128)) + >>> half_db_norms = tf.norm(db, axis=1) / 2 + >>> dists, neighbors = l2_ann(qy, db, half_db_norms) + + In the example above, we compute `db_norms/2 - dot(qy, db^T)` instead of + `qy^2 - 2 dot(qy, db^T) + db^2` for performance reason. The former uses less + arithmetics and produces the same set of neighbors. + """ + return gen_nn_ops.approx_top_k( + operand, + k=k, + reduction_dimension=reduction_dimension, + recall_target=recall_target, + is_max_k=False, + reduction_input_size_override=reduction_input_size_override, + aggregate_to_topk=aggregate_to_topk, + name=name) + + +def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin + r"""Finds values of the `n`-th smallest value for the last dimension. + + Note that n is zero-indexed. + + If the input is a vector (rank-1), finds the entries which is the nth-smallest + value in the vector and outputs their values as scalar tensor. + + For matrices (resp. higher rank input), computes the entries which is the + nth-smallest value in each row (resp. vector along the last dimension). Thus, + + values.shape = input.shape[:-1] + + Args: + input: 1-D or higher `Tensor` with last dimension at least `n+1`. + n: A `Tensor` of type `int32`. + 0-D. Position of sorted vector to select along the last dimension (along + each row for matrices). Valid range of n is `[0, input.shape[:-1])` + reverse: An optional `bool`. Defaults to `False`. + When set to True, find the nth-largest value in the vector and vice + versa. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + The `n`-th order statistic along each last dimensional slice. + """ + return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name) + + +@tf_export(v1=["nn.fractional_max_pool"]) +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " + "args are deprecated. Use fractional_max_pool_v2.") +def fractional_max_pool(value, + pooling_ratio, + pseudo_random=False, + overlapping=False, + deterministic=False, + seed=0, + seed2=0, + name=None): # pylint: disable=redefined-builtin + r"""Performs fractional max pooling on the input. + + This is a deprecated version of `fractional_max_pool`. + + Fractional max pooling is slightly different than regular max pooling. In + regular max pooling, you downsize an input set by taking the maximum value of + smaller N x N subsections of the set (often 2x2), and try to reduce the set by + a factor of N, where N is an integer. Fractional max pooling, as you might + expect from the word "fractional", means that the overall reduction ratio N + does not have to be an integer. + + The sizes of the pooling regions are generated randomly but are fairly + uniform. For example, let's look at the height dimension, and the constraints + on the list of rows that will be pool boundaries. + + First we define the following: + + 1. input_row_length : the number of rows from the input set + 2. output_row_length : which will be smaller than the input + 3. alpha = input_row_length / output_row_length : our reduction ratio + 4. K = floor(alpha) + 5. row_pooling_sequence : this is the result list of pool boundary rows + + Then, row_pooling_sequence should satisfy: + + 1. a[0] = 0 : the first value of the sequence is 0 + 2. a[end] = input_row_length : the last value of the sequence is the size + 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + 4. length(row_pooling_sequence) = output_row_length+1 + + Args: + value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. + pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for + each dimension of `value`, currently only supports row and col dimension + and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, + 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't + allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling + ratio on height and width dimensions respectively. + pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, + generates the pooling sequence in a pseudorandom fashion, otherwise, in a + random fashion. Check (Graham, 2015) for difference between + pseudorandom and random. + overlapping: An optional `bool`. Defaults to `False`. When set to `True`, + it means when pooling, the values at the boundary of adjacent pooling + cells are used by both cells. For example: + `index 0 1 2 3 4` + `value 20 5 16 3 7` + If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used + twice. The result would be [20, 16] for fractional max pooling. + deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2` + instead. + seed: An optional `int`. Defaults to `0`. If set to be non-zero, the + random number generator is seeded by the given seed. Otherwise it is + seeded by a random seed. + seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, + `col_pooling_sequence`). + output: Output `Tensor` after fractional max pooling. Has the same type as + `value`. + row_pooling_sequence: A `Tensor` of type `int64`. + col_pooling_sequence: A `Tensor` of type `int64`. + + Raises: + ValueError: If op determinism is enabled and either the seeds are not set or + the "deterministic" argument is False. + + References: + Fractional Max-Pooling: + [Graham, 2015](https://arxiv.org/abs/1412.6071) + ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) + """ + if config.is_op_determinism_enabled() and (not seed or not seed2 or + not deterministic): + raise ValueError( + f'tf.compat.v1.nn.fractional_max_pool requires "seed" and ' + f'"seed2" to be non-zero and "deterministic" to be true when op ' + f"determinism is enabled. Please pass in such values, e.g. by passing" + f'"seed=1, seed2=1, deterministic=True". Got: seed={seed}, ' + f'seed2={seed2}, deterministic={deterministic}') + return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic, seed, seed2, + name) + + +@tf_export("nn.fractional_max_pool", v1=[]) +@dispatch.add_dispatch_support +def fractional_max_pool_v2(value, + pooling_ratio, + pseudo_random=False, + overlapping=False, + seed=0, + name=None): # pylint: disable=redefined-builtin + r"""Performs fractional max pooling on the input. + + Fractional max pooling is slightly different than regular max pooling. In + regular max pooling, you downsize an input set by taking the maximum value of + smaller N x N subsections of the set (often 2x2), and try to reduce the set by + a factor of N, where N is an integer. Fractional max pooling, as you might + expect from the word "fractional", means that the overall reduction ratio N + does not have to be an integer. + + The sizes of the pooling regions are generated randomly but are fairly + uniform. For example, let's look at the height dimension, and the constraints + on the list of rows that will be pool boundaries. + + First we define the following: + + 1. input_row_length : the number of rows from the input set + 2. output_row_length : which will be smaller than the input + 3. alpha = input_row_length / output_row_length : our reduction ratio + 4. K = floor(alpha) + 5. row_pooling_sequence : this is the result list of pool boundary rows + + Then, row_pooling_sequence should satisfy: + + 1. a[0] = 0 : the first value of the sequence is 0 + 2. a[end] = input_row_length : the last value of the sequence is the size + 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + 4. length(row_pooling_sequence) = output_row_length+1 + + Args: + value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. + pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`. + Pooling ratio for each dimension of `value`, currently only supports row + and col dimension and should be >= 1.0. For example, a valid pooling ratio + looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 + because we don't allow pooling on batch and channels dimensions. 1.44 and + 1.73 are pooling ratio on height and width dimensions respectively. + pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, + generates the pooling sequence in a pseudorandom fashion, otherwise, in a + random fashion. Check paper (Graham, 2015) for difference between + pseudorandom and random. + overlapping: An optional `bool`. Defaults to `False`. When set to `True`, + it means when pooling, the values at the boundary of adjacent pooling + cells are used by both cells. For example: + `index 0 1 2 3 4` + `value 20 5 16 3 7` + If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used + twice. The result would be [20, 16] for fractional max pooling. + seed: An optional `int`. Defaults to `0`. If set to be non-zero, the + random number generator is seeded by the given seed. Otherwise it is + seeded by a random seed. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, + `col_pooling_sequence`). + output: Output `Tensor` after fractional max pooling. Has the same type as + `value`. + row_pooling_sequence: A `Tensor` of type `int64`. + col_pooling_sequence: A `Tensor` of type `int64`. + + Raises: + ValueError: If no seed is specified and op determinism is enabled. + + References: + Fractional Max-Pooling: + [Graham, 2015](https://arxiv.org/abs/1412.6071) + ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) + """ + if (isinstance(pooling_ratio, (list, tuple))): + if (pooling_ratio[0] != 1.0 or pooling_ratio[-1] != 1.0): + raise ValueError( + "`pooling_ratio` should have first and last elements with value 1.0. " + f"Received: pooling_ratio={pooling_ratio}") + for element in pooling_ratio: + if element < 1.0: + raise ValueError( + f"`pooling_ratio` elements should be >= 1.0. " + f"Received: pooling_ratio={pooling_ratio}") + elif (isinstance(pooling_ratio, (int, float))): + if pooling_ratio < 1.0: + raise ValueError( + "`pooling_ratio` should be >= 1.0. " + f"Received: pooling_ratio={pooling_ratio}") + else: + raise ValueError( + "`pooling_ratio` should be an int or a list of ints. " + f"Received: pooling_ratio={pooling_ratio}") + + pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio") + + if seed == 0: + if config.is_op_determinism_enabled(): + raise ValueError( + f"tf.nn.fractional_max_pool requires a non-zero seed to be passed in " + f"when determinism is enabled, but got seed={seed}. Please pass in a " + f'non-zero seed, e.g. by passing "seed=1".') + return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic=False, + seed=0, seed2=0, name=name) + else: + seed1, seed2 = random_seed.get_seed(seed) + return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic=True, + seed=seed1, seed2=seed2, name=name) + + +@tf_export(v1=["nn.fractional_avg_pool"]) +@dispatch.add_dispatch_support +@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " + "args are deprecated. Use fractional_avg_pool_v2.") +def fractional_avg_pool(value, + pooling_ratio, + pseudo_random=False, + overlapping=False, + deterministic=False, + seed=0, + seed2=0, + name=None): # pylint: disable=redefined-builtin + r"""Performs fractional average pooling on the input. + + This is a deprecated version of `fractional_avg_pool`. + + Fractional average pooling is similar to Fractional max pooling in the pooling + region generation step. The only difference is that after pooling regions are + generated, a mean operation is performed instead of a max operation in each + pooling region. + + Args: + value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. + pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for + each dimension of `value`, currently only supports row and col dimension + and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, + 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't + allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling + ratio on height and width dimensions respectively. + pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, + generates the pooling sequence in a pseudorandom fashion, otherwise, in a + random fashion. Check paper (Graham, 2015) for difference between + pseudorandom and random. + overlapping: An optional `bool`. Defaults to `False`. When set to `True`, + it means when pooling, the values at the boundary of adjacent pooling + cells are used by both cells. For example: + `index 0 1 2 3 4` + `value 20 5 16 3 7` + If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used + twice. The result would be [20, 16] for fractional avg pooling. + deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2` + instead. + seed: An optional `int`. Defaults to `0`. If set to be non-zero, the + random number generator is seeded by the given seed. Otherwise it is + seeded by a random seed. + seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, + `col_pooling_sequence`). + output: Output `Tensor` after fractional avg pooling. Has the same type as + `value`. + row_pooling_sequence: A `Tensor` of type `int64`. + col_pooling_sequence: A `Tensor` of type `int64`. + + References: + Fractional Max-Pooling: + [Graham, 2015](https://arxiv.org/abs/1412.6071) + ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) + """ + return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic, seed, seed2, + name=name) + + +@tf_export("nn.fractional_avg_pool", v1=[]) +@dispatch.add_dispatch_support +def fractional_avg_pool_v2(value, + pooling_ratio, + pseudo_random=False, + overlapping=False, + seed=0, + name=None): # pylint: disable=redefined-builtin + r"""Performs fractional average pooling on the input. + + Fractional average pooling is similar to Fractional max pooling in the pooling + region generation step. The only difference is that after pooling regions are + generated, a mean operation is performed instead of a max operation in each + pooling region. + + Args: + value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. + pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for + each dimension of `value`, currently only supports row and col dimension + and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, + 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't + allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling + ratio on height and width dimensions respectively. + pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, + generates the pooling sequence in a pseudorandom fashion, otherwise, in a + random fashion. Check paper (Graham, 2015) for difference between + pseudorandom and random. + overlapping: An optional `bool`. Defaults to `False`. When set to `True`, + it means when pooling, the values at the boundary of adjacent pooling + cells are used by both cells. For example: + `index 0 1 2 3 4` + `value 20 5 16 3 7` + If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used + twice. The result would be [20, 16] for fractional avg pooling. + seed: An optional `int`. Defaults to `0`. If set to be non-zero, the + random number generator is seeded by the given seed. Otherwise it is + seeded by a random seed. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, + `col_pooling_sequence`). + output: Output `Tensor` after fractional avg pooling. Has the same type as + `value`. + row_pooling_sequence: A `Tensor` of type `int64`. + col_pooling_sequence: A `Tensor` of type `int64`. + + References: + Fractional Max-Pooling: + [Graham, 2015](https://arxiv.org/abs/1412.6071) + ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) + """ + if seed == 0: + return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic=False, + seed=0, seed2=0, name=name) + else: + seed1, seed2 = random_seed.get_seed(seed) + return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, + overlapping, deterministic=True, + seed=seed1, seed2=seed2, name=name) + + +@ops.RegisterStatistics("Dilation2D", "flops") +def _calc_dilation2d_flops(graph, node): + """Calculates the compute resources needed for Dilation2D.""" + input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) + input_shape.assert_is_fully_defined() + filter_shape = graph_util.tensor_shape_from_node_def_name( + graph, node.input[1]) + filter_shape.assert_is_fully_defined() + output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) + output_shape.assert_is_fully_defined() + filter_height = int(filter_shape[0]) + filter_width = int(filter_shape[1]) + output_count = np.prod(output_shape.as_list(), dtype=np.int64) + return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) + + +@tf_export(v1=["nn.erosion2d"]) +@dispatch.add_dispatch_support +def erosion2d(value, kernel, strides, rates, padding, name=None): + """Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors. + + The `value` tensor has shape `[batch, in_height, in_width, depth]` and the + `kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e., + each input channel is processed independently of the others with its own + structuring function. The `output` tensor has shape + `[batch, out_height, out_width, depth]`. The spatial dimensions of the + output tensor depend on the `padding` algorithm. We currently only support the + default "NHWC" `data_format`. + + In detail, the grayscale morphological 2-D erosion is given by: + + output[b, y, x, c] = + min_{dy, dx} value[b, + strides[1] * y - rates[1] * dy, + strides[2] * x - rates[2] * dx, + c] - + kernel[dy, dx, c] + + Duality: The erosion of `value` by the `kernel` is equal to the negation of + the dilation of `-value` by the reflected `kernel`. + + Args: + value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. + kernel: A `Tensor`. Must have the same type as `value`. + 3-D with shape `[kernel_height, kernel_width, depth]`. + strides: A list of `ints` that has length `>= 4`. + 1-D of length 4. The stride of the sliding window for each dimension of + the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + rates: A list of `ints` that has length `>= 4`. + 1-D of length 4. The input stride for atrous morphological dilation. + Must be: `[1, rate_height, rate_width, 1]`. + padding: A `string` from: `"SAME", "VALID"`. + The type of padding algorithm to use. + name: A name for the operation (optional). If not specified "erosion2d" + is used. + + Returns: + A `Tensor`. Has the same type as `value`. + 4-D with shape `[batch, out_height, out_width, depth]`. + Raises: + ValueError: If the `value` depth does not match `kernel`' shape, or if + padding is other than `'VALID'` or `'SAME'`. + """ + with ops.name_scope(name, "erosion2d", [value, kernel]) as name: + # Reduce erosion to dilation by duality. + return math_ops.negative( + gen_nn_ops.dilation2d( + input=math_ops.negative(value), + filter=array_ops.reverse_v2(kernel, [0, 1]), + strides=strides, + rates=rates, + padding=padding, + name=name)) + + +@tf_export("nn.erosion2d", v1=[]) +@dispatch.add_dispatch_support +def erosion2d_v2(value, + filters, + strides, + padding, + data_format, + dilations, + name=None): + """Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors. + + The `value` tensor has shape `[batch, in_height, in_width, depth]` and the + `filters` tensor has shape `[filters_height, filters_width, depth]`, i.e., + each input channel is processed independently of the others with its own + structuring function. The `output` tensor has shape + `[batch, out_height, out_width, depth]`. The spatial dimensions of the + output tensor depend on the `padding` algorithm. We currently only support the + default "NHWC" `data_format`. + + In detail, the grayscale morphological 2-D erosion is given by: + + output[b, y, x, c] = + min_{dy, dx} value[b, + strides[1] * y - dilations[1] * dy, + strides[2] * x - dilations[2] * dx, + c] - + filters[dy, dx, c] + + Duality: The erosion of `value` by the `filters` is equal to the negation of + the dilation of `-value` by the reflected `filters`. + + Args: + value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. + filters: A `Tensor`. Must have the same type as `value`. + 3-D with shape `[filters_height, filters_width, depth]`. + strides: A list of `ints` that has length `>= 4`. + 1-D of length 4. The stride of the sliding window for each dimension of + the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + padding: A `string` from: `"SAME", "VALID"`. + The type of padding algorithm to use. See + [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) + for more information. + data_format: A `string`, only `"NHWC"` is currently supported. + dilations: A list of `ints` that has length `>= 4`. + 1-D of length 4. The input stride for atrous morphological dilation. + Must be: `[1, rate_height, rate_width, 1]`. + name: A name for the operation (optional). If not specified "erosion2d" + is used. + + Returns: + A `Tensor`. Has the same type as `value`. + 4-D with shape `[batch, out_height, out_width, depth]`. + + Raises: + ValueError: If the `value` depth does not match `filters`' shape, or if + padding is other than `'VALID'` or `'SAME'`. + """ + if data_format != "NHWC": + raise ValueError("`data_format` values other than 'NHWC' are not " + f"supported. Received: data_format={data_format}") + + with ops.name_scope(name, "erosion2d", [value, filters]) as name: + # Reduce erosion to dilation by duality. + return math_ops.negative( + gen_nn_ops.dilation2d( + input=math_ops.negative(value), + filter=array_ops.reverse_v2(filters, [0, 1]), + strides=strides, + rates=dilations, + padding=padding, + name=name)) + + +@tf_export(v1=["math.in_top_k", "nn.in_top_k"]) +@dispatch.add_dispatch_support +def in_top_k(predictions, targets, k, name=None): + r"""Says whether the targets are in the top `K` predictions. + + This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + prediction for the target class is finite (not inf, -inf, or nan) and among + the top `k` predictions among all predictions for example `i`. Note that the + behavior of `InTopK` differs from the `TopK` op in its handling of ties; if + multiple classes have the same prediction value and straddle the top-`k` + boundary, all of those classes are considered to be in the top `k`. + + More formally, let + + \\(predictions_i\\) be the predictions for all classes for example `i`, + \\(targets_i\\) be the target class for example `i`, + \\(out_i\\) be the output for example `i`, + + $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + + Args: + predictions: A `Tensor` of type `float32`. + A `batch_size` x `classes` tensor. + targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A `batch_size` vector of class ids. + k: An `int`. Number of top elements to look at for computing precision. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`. + """ + with ops.name_scope(name, "in_top_k"): + return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name) + + +@tf_export("math.in_top_k", "nn.in_top_k", v1=[]) +@dispatch.add_dispatch_support +def in_top_k_v2(targets, predictions, k, name=None): + """Outputs whether the targets are in the top `K` predictions. + + This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + prediction for the target class is finite (not inf, -inf, or nan) and among + the top `k` predictions among all predictions for example `i`. + `predictions` does not have to be normalized. + + Note that the behavior of `InTopK` differs from the `TopK` op in its handling + of ties; if multiple classes have the same prediction value and straddle the + top-`k` boundary, all of those classes are considered to be in the top `k`. + + >>> target = tf.constant([0, 1, 3]) + >>> pred = tf.constant([ + ... [1.2, -0.3, 2.8, 5.2], + ... [0.1, 0.0, 0.0, 0.0], + ... [0.0, 0.5, 0.3, 0.3]], + ... dtype=tf.float32) + >>> print(tf.math.in_top_k(target, pred, 2)) + tf.Tensor([False True True], shape=(3,), dtype=bool) + + Args: + targets: A `batch_size` vector of class ids. Must be `int32` or `int64`. + predictions: A `batch_size` x `classes` tensor of type `float32`. + k: An `int`. The parameter to specify search space. + name: A name for the operation (optional). + + Returns: + A `Tensor` with the same shape of `targets` with type of `bool`. Each + element specifies if the target falls into top-k predictions. + """ + return in_top_k(predictions, targets, k, name) + + +quantized_avg_pool = tf_export(v1=["nn.quantized_avg_pool"])( + dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool) +) +quantized_conv2d = tf_export(v1=["nn.quantized_conv2d"])( + dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d) +) +quantized_relu_x = tf_export(v1=["nn.quantized_relu_x"])( + dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x) +) +quantized_max_pool = tf_export(v1=["nn.quantized_max_pool"])( + dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool) +) + + +@tf_export("nn.isotonic_regression", v1=[]) +@dispatch.add_dispatch_support +def isotonic_regression(inputs, decreasing=True, axis=-1): + r"""Solves isotonic regression problems along the given axis. + + For each vector x, the problem solved is + + $$\argmin_{y_1 >= y_2 >= ... >= y_n} \sum_i (x_i - y_i)^2.$$ + + As the solution is component-wise constant, a second tensor is returned that + encodes the segments. The problems are solved over the given axis. + + Consider the following example, where we solve a batch of two problems. The + first input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1). + >>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32) + >>> y, segments = tf.nn.isotonic_regression(x, axis=1) + >>> y # The solution. + + + Note that the first solution has two blocks [2] and [1.5, 1.5]. The second + solution is constant, and thus has a single segment. These segments are + exactly what the second returned tensor encodes: + + >>> segments + + + + Args: + inputs: A tensor holding the inputs. + decreasing: If set to False, the inequalities in the optimizing constrained + are flipped. + axis: The axis along which the problems should be solved. + + Returns: + output: The solutions, same shape as type as the input. + segments: An int32 tensor, same shape as the input indicating the segments + that have the same value. Specifically, those positions that have the same + value correspond to the same segment. These values start at zero, and are + monotonously increasing for each solution. + """ + type_promotions = { + # Float types get mapped to themselves, int8/16 to float32, rest to double + dtypes.float32: + dtypes.float32, + dtypes.half: + dtypes.half, + dtypes.bfloat16: + dtypes.bfloat16, + dtypes.int8: + dtypes.float32, + dtypes.int16: + dtypes.float32, + } + inputs = ops.convert_to_tensor(inputs) + try: + output_dtype = type_promotions[inputs.dtype] + except KeyError: + output_dtype = dtypes.float64 + + def compute_on_matrix(matrix, name=None): + iso_fn = functools.partial( + gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name) + if decreasing: + return iso_fn(matrix) + else: + output, segments = iso_fn(-matrix) + return -output, segments + + return _wrap_2d_function(inputs, compute_on_matrix, axis) + + +# Register elementwise ops that don't have Python wrappers. +# Unary elementwise ops. +dispatch.register_unary_elementwise_api(gen_nn_ops.elu) +dispatch.register_unary_elementwise_api(gen_nn_ops.relu) +dispatch.register_unary_elementwise_api(gen_nn_ops.selu) +dispatch.register_unary_elementwise_api(gen_nn_ops.softsign) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/proto_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/proto_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9517795e74ae7d6f03e065656f6210fa9dac91ad --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/proto_ops.py @@ -0,0 +1,25 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +# pylint: disable=wildcard-import,unused-import +"""Protocol Buffer encoding and decoding from tensors.""" +# pylint: disable=unused-import +from tensorflow.python.framework import ops +from tensorflow.python.ops.gen_decode_proto_ops import decode_proto_v2 as decode_proto +from tensorflow.python.ops.gen_encode_proto_ops import encode_proto +# pylint: enable=unused-import + +ops.NotDifferentiable("DecodeProtoV2") +ops.NotDifferentiable("EncodeProto") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..366b2e631ac2e3eb332188fc1ddb76d71a991b5e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/random_ops.py @@ -0,0 +1,629 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for generating random numbers.""" + +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_random_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import shape_util + +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_random_ops import * +# pylint: enable=wildcard-import + +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("random.normal", v1=["random.normal", "random_normal"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("random_normal") +def random_normal(shape, + mean=0.0, + stddev=1.0, + dtype=dtypes.float32, + seed=None, + name=None): + """Outputs random values from a normal distribution. + + Example that generates a new set of random values every time: + + >>> tf.random.set_seed(5); + >>> tf.random.normal([4], 0, 1, tf.float32) + + + Example that outputs a reproducible result: + + >>> tf.random.set_seed(5); + >>> tf.random.normal([2,2], 0, 1, tf.float32, seed=1) + + + In this case, we are setting both the global and operation-level seed to + ensure this result is reproducible. See `tf.random.set_seed` for more + information. + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + mean: A Tensor or Python value of type `dtype`, broadcastable with `stddev`. + The mean of the normal distribution. + stddev: A Tensor or Python value of type `dtype`, broadcastable with `mean`. + The standard deviation of the normal distribution. + dtype: The float type of the output: `float16`, `bfloat16`, `float32`, + `float64`. Defaults to `float32`. + seed: A Python integer. Used to create a random seed for the distribution. + See + `tf.random.set_seed` + for behavior. + name: A name for the operation (optional). + + Returns: + A tensor of the specified shape filled with random normal values. + """ + with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name: + shape_tensor = shape_util.shape_tensor(shape) + mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") + stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") + seed1, seed2 = random_seed.get_seed(seed) + rnd = gen_random_ops.random_standard_normal( + shape_tensor, dtype, seed=seed1, seed2=seed2) + mul = rnd * stddev_tensor + value = math_ops.add(mul, mean_tensor, name=name) + shape_util.maybe_set_static_shape(value, shape) + return value + + +ops.NotDifferentiable("RandomStandardNormal") + + +def parameterized_truncated_normal(shape, + means=0.0, + stddevs=1.0, + minvals=-2.0, + maxvals=2.0, + dtype=dtypes.float32, + seed=None, + name=None): + """Outputs random values from a truncated normal distribution. + + The generated values follow a normal distribution with specified mean and + standard deviation, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + means: A 0-D Tensor or Python value of type `dtype`. The mean of the + truncated normal distribution. + stddevs: A 0-D Tensor or Python value of type `dtype`. The standard + deviation of the truncated normal distribution. + minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of + the truncated normal distribution. + maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of + the truncated normal distribution. + dtype: The type of the output. + seed: A Python integer. Used to create a random seed for the distribution. + See + `tf.random.set_seed` + for behavior. + name: A name for the operation (optional). + + Returns: + A tensor of the specified shape filled with random truncated normal values. + """ + with ops.name_scope(name, "parameterized_truncated_normal", + [shape, means, stddevs, minvals, maxvals]) as name: + shape_tensor = shape_util.shape_tensor(shape) + means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means") + stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs") + minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals") + maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals") + seed1, seed2 = random_seed.get_seed(seed) + rnd = gen_random_ops.parameterized_truncated_normal( + shape_tensor, + means_tensor, + stddevs_tensor, + minvals_tensor, + maxvals_tensor, + seed=seed1, + seed2=seed2) + shape_util.maybe_set_static_shape(rnd, shape) + return rnd + + +@tf_export("random.truncated_normal", + v1=["random.truncated_normal", "truncated_normal"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("truncated_normal") +def truncated_normal(shape, + mean=0.0, + stddev=1.0, + dtype=dtypes.float32, + seed=None, + name=None): + """Outputs random values from a truncated normal distribution. + + The values are drawn from a normal distribution with specified mean and + standard deviation, discarding and re-drawing any samples that are more than + two standard deviations from the mean. + + Examples: + + >>> tf.random.truncated_normal(shape=[2]) + + + >>> tf.random.truncated_normal(shape=[2], mean=3, stddev=1, dtype=tf.float32) + + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + mean: A 0-D Tensor or Python value of type `dtype`. The mean of the + truncated normal distribution. + stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation + of the normal distribution, before truncation. + dtype: The type of the output. Restricted to floating-point types: + `tf.half`, `tf.float`, `tf.double`, etc. + seed: A Python integer. Used to create a random seed for the distribution. + See `tf.random.set_seed` for more information. + name: A name for the operation (optional). + + Returns: + A tensor of the specified shape filled with random truncated normal values. + """ + with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name: + shape_tensor = shape_util.shape_tensor(shape) + mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") + stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") + seed1, seed2 = random_seed.get_seed(seed) + rnd = gen_random_ops.truncated_normal( + shape_tensor, dtype, seed=seed1, seed2=seed2) + mul = rnd * stddev_tensor + value = math_ops.add(mul, mean_tensor, name=name) + shape_util.maybe_set_static_shape(value, shape) + return value + + +ops.NotDifferentiable("ParameterizedTruncatedNormal") +ops.NotDifferentiable("TruncatedNormal") + + +@tf_export("random.uniform", v1=["random.uniform", "random_uniform"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("random_uniform") +def random_uniform(shape, + minval=0, + maxval=None, + dtype=dtypes.float32, + seed=None, + name=None): + """Outputs random values from a uniform distribution. + + The generated values follow a uniform distribution in the range + `[minval, maxval)`. The lower bound `minval` is included in the range, while + the upper bound `maxval` is excluded. + + For floats, the default range is `[0, 1)`. For ints, at least `maxval` must + be specified explicitly. + + In the integer case, the random integers are slightly biased unless + `maxval - minval` is an exact power of two. The bias is small for values of + `maxval - minval` significantly smaller than the range of the output (either + `2**32` or `2**64`). + + Examples: + + >>> tf.random.uniform(shape=[2]) + + >>> tf.random.uniform(shape=[], minval=-1., maxval=0.) + + >>> tf.random.uniform(shape=[], minval=5, maxval=10, dtype=tf.int64) + + + The `seed` argument produces a deterministic sequence of tensors across + multiple calls. To repeat that sequence, use `tf.random.set_seed`: + + >>> tf.random.set_seed(5) + >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) + + >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) + + >>> tf.random.set_seed(5) + >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) + + >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) + + + Without `tf.random.set_seed` but with a `seed` argument is specified, small + changes to function graphs or previously executed operations will change the + returned value. See `tf.random.set_seed` for details. + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + minval: A Tensor or Python value of type `dtype`, broadcastable with + `shape` (for integer types, broadcasting is not supported, so it needs to + be a scalar). The lower bound on the range of random values to generate + (inclusive). Defaults to 0. + maxval: A Tensor or Python value of type `dtype`, broadcastable with + `shape` (for integer types, broadcasting is not supported, so it needs to + be a scalar). The upper bound on the range of random values to generate + (exclusive). Defaults to 1 if `dtype` is floating point. + dtype: The type of the output: `float16`, `bfloat16`, `float32`, `float64`, + `int32`, or `int64`. Defaults to `float32`. + seed: A Python integer. Used in combination with `tf.random.set_seed` to + create a reproducible sequence of tensors across multiple calls. + name: A name for the operation (optional). + + Returns: + A tensor of the specified shape filled with random uniform values. + + Raises: + ValueError: If `dtype` is integral and `maxval` is not specified. + """ + dtype = dtypes.as_dtype(dtype) + accepted_dtypes = (dtypes.float16, dtypes.bfloat16, dtypes.float32, + dtypes.float64, dtypes.int32, dtypes.int64) + if dtype not in accepted_dtypes: + raise ValueError( + f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are " + f"{accepted_dtypes}.") + if maxval is None: + if dtype.is_integer: + raise ValueError("Must specify maxval for integer dtype %r" % dtype) + maxval = 1 + with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name: + shape = shape_util.shape_tensor(shape) + # In case of [0,1) floating results, minval and maxval is unused. We do an + # `is` comparison here since this is cheaper than isinstance or __eq__. + minval_is_zero = isinstance(minval, int) and minval == 0 + maxval_is_one = isinstance(maxval, int) and maxval == 1 + if not minval_is_zero or not maxval_is_one or dtype.is_integer: + minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") + maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") + seed1, seed2 = random_seed.get_seed(seed) + if dtype.is_integer: + result = gen_random_ops.random_uniform_int( + shape, minval, maxval, seed=seed1, seed2=seed2, name=name) + else: + result = gen_random_ops.random_uniform( + shape, dtype, seed=seed1, seed2=seed2) + if minval_is_zero: + if not maxval_is_one: + result = math_ops.multiply(result, maxval) + else: + result = math_ops.add(result * (maxval - minval), minval, name=name) + # TODO(b/132092188): C++ shape inference inside functional ops does not + # cross FuncGraph boundaries since that information is only available in + # python. So we manually get the static shape using + # `constant_value_as_shape` which *does* cross function boundaries. + shape_util.maybe_set_static_shape(result, shape) + return result + + +ops.NotDifferentiable("RandomUniform") + + +@tf_export("random.shuffle", v1=["random.shuffle", "random_shuffle"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("random_shuffle") +def random_shuffle(value, seed=None, name=None): + """Randomly shuffles a tensor along its first dimension. + + The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + to one and only one `output[i]`. For example, a mapping that might occur for a + 3x2 tensor is: + + ```python + [[1, 2], [[5, 6], + [3, 4], ==> [1, 2], + [5, 6]] [3, 4]] + ``` + + Args: + value: A Tensor to be shuffled. + seed: A Python integer. Used to create a random seed for the distribution. + See + `tf.random.set_seed` + for behavior. + name: A name for the operation (optional). + + Returns: + A tensor of same shape and type as `value`, shuffled along its first + dimension. + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_random_ops.random_shuffle( + value, seed=seed1, seed2=seed2, name=name) + + +ops.NotDifferentiable("RandomShuffle") + + +@tf_export(v1=["random.multinomial", "multinomial"]) +@dispatch.add_dispatch_support +@deprecation.deprecated( + date=None, instructions="Use `tf.random.categorical` instead.") +def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None): + """Draws samples from a multinomial distribution. + + Example: + + ```python + # samples has shape [1, 5], where each value is either 0 or 1 with equal + # probability. + samples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5) + ``` + + Args: + logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice + `[i, :]` represents the unnormalized log-probabilities for all classes. + num_samples: 0-D. Number of independent samples to draw for each row slice. + seed: A Python integer. Used to create a random seed for the distribution. + See `tf.random.set_seed` for behavior. + name: Optional name for the operation. + output_dtype: The integer type of the output: `int32` or `int64`. Defaults + to `int64`. + + Returns: + The drawn samples of shape `[batch_size, num_samples]`. + """ + with ops.name_scope(name, "multinomial", [logits]): + return multinomial_categorical_impl(logits, num_samples, output_dtype, seed) + + +@tf_export("random.categorical") +@dispatch.add_dispatch_support +def categorical(logits, num_samples, dtype=None, seed=None, name=None): + """Draws samples from a categorical distribution. + + Example: + + ```python + # samples has shape [1, 5], where each value is either 0 or 1 with equal + # probability. + samples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5) + ``` + + Args: + logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice + `[i, :]` represents the unnormalized log-probabilities for all classes. + num_samples: 0-D. Number of independent samples to draw for each row slice. + dtype: The integer type of the output: `int32` or `int64`. Defaults to + `int64`. + seed: A Python integer. Used to create a random seed for the distribution. + See `tf.random.set_seed` for behavior. + name: Optional name for the operation. + + Returns: + The drawn samples of shape `[batch_size, num_samples]`. + """ + with ops.name_scope(name, "categorical", [logits]): + return multinomial_categorical_impl(logits, num_samples, dtype, seed) + + +def multinomial_categorical_impl(logits, num_samples, dtype, seed): + """Implementation for random.categorical (v1) and random.categorical (v2).""" + logits = ops.convert_to_tensor(logits, name="logits") + dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64 + accepted_dtypes = (dtypes.int32, dtypes.int64) + if dtype not in accepted_dtypes: + raise ValueError( + f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are " + f"{accepted_dtypes}.") + seed1, seed2 = random_seed.get_seed(seed) + return gen_random_ops.multinomial( + logits, num_samples, seed=seed1, seed2=seed2, output_dtype=dtype) + + +ops.NotDifferentiable("Multinomial") + + +def _maybe_set_static_shape_helper(tensor, shape, postfix_tensor): + if (not context.executing_eagerly() and + ops.get_default_graph().building_function and + not tensor.shape.is_fully_defined()): + shape = shape_util.shape_tensor(shape) + const_shape = tensor_util.constant_value_as_shape(shape) + postfix_tensor = ops.convert_to_tensor(postfix_tensor) + tensor.set_shape(const_shape.concatenate(postfix_tensor.shape)) + + +@tf_export("random.gamma", v1=["random.gamma", "random_gamma"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("random_gamma") +def random_gamma(shape, + alpha, + beta=None, + dtype=dtypes.float32, + seed=None, + name=None): + """Draws `shape` samples from each of the given Gamma distribution(s). + + `alpha` is the shape parameter describing the distribution(s), and `beta` is + the inverse scale parameter(s). + + Note: Because internal calculations are done using `float64` and casting has + `floor` semantics, we must manually map zero outcomes to the smallest + possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This + means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise + should. This bias can only happen for small values of `alpha`, i.e., + `alpha << 1` or large values of `beta`, i.e., `beta >> 1`. + + The samples are differentiable w.r.t. alpha and beta. + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + Example: + + ```python + samples = tf.random.gamma([10], [0.5, 1.5]) + # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents + # the samples drawn from each distribution + + samples = tf.random.gamma([7, 5], [0.5, 1.5]) + # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] + # represents the 7x5 samples drawn from each of the two distributions + + alpha = tf.constant([[1.],[3.],[5.]]) + beta = tf.constant([[3., 4.]]) + samples = tf.random.gamma([30], alpha=alpha, beta=beta) + # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions. + + loss = tf.reduce_mean(tf.square(samples)) + dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta]) + # unbiased stochastic derivatives of the loss function + alpha.shape == dloss_dalpha.shape # True + beta.shape == dloss_dbeta.shape # True + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output samples + to be drawn per alpha/beta-parameterized distribution. + alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha` + provides the shape parameter(s) describing the gamma distribution(s) to + sample. Must be broadcastable with `beta`. + beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1. + `beta` provides the inverse scale parameter(s) of the gamma + distribution(s) to sample. Must be broadcastable with `alpha`. + dtype: The type of alpha, beta, and the output: `float16`, `float32`, or + `float64`. + seed: A Python integer. Used to create a random seed for the distributions. + See + `tf.random.set_seed` + for behavior. + name: Optional name for the operation. + + Returns: + samples: a `Tensor` of shape + `tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type + `dtype`. + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + with ops.name_scope(name, "random_gamma", [shape, alpha, beta]): + shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32) + alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype) + beta = ops.convert_to_tensor( + beta if beta is not None else 1, name="beta", dtype=dtype) + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(alpha), array_ops.shape(beta)) + alpha_broadcast = array_ops.broadcast_to(alpha, broadcast_shape) + seed1, seed2 = random_seed.get_seed(seed) + result = math_ops.maximum( + np.finfo(alpha.dtype.as_numpy_dtype).tiny, + gen_random_ops.random_gamma( + shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta) + _maybe_set_static_shape_helper(result, shape, alpha_broadcast) + return result + + +@tf_export(v1=["random.poisson", "random_poisson"]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints("random_poisson") +def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None): + """Draws `shape` samples from each of the given Poisson distribution(s). + + `lam` is the rate parameter describing the distribution(s). + + Example: + + ```python + samples = tf.random.poisson([0.5, 1.5], [10]) + # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents + # the samples drawn from each distribution + + samples = tf.random.poisson([12.2, 3.3], [7, 5]) + # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] + # represents the 7x5 samples drawn from each of the two distributions + ``` + + Args: + lam: A Tensor or Python value or N-D array of type `dtype`. + `lam` provides the rate parameter(s) describing the poisson + distribution(s) to sample. + shape: A 1-D integer Tensor or Python array. The shape of the output samples + to be drawn per "rate"-parameterized distribution. + dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or + `int64`. + seed: A Python integer. Used to create a random seed for the distributions. + See + `tf.random.set_seed` + for behavior. + name: Optional name for the operation. + + Returns: + samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` + with values of type `dtype`. + """ + return random_poisson_v2(shape, lam, dtype, seed, name) + + +@tf_export("random.poisson", v1=[]) +@dispatch.add_dispatch_support +def random_poisson_v2(shape, lam, dtype=dtypes.float32, seed=None, name=None): + """Draws `shape` samples from each of the given Poisson distribution(s). + + `lam` is the rate parameter describing the distribution(s). + + Example: + + ```python + samples = tf.random.poisson([10], [0.5, 1.5]) + # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents + # the samples drawn from each distribution + + samples = tf.random.poisson([7, 5], [12.2, 3.3]) + # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] + # represents the 7x5 samples drawn from each of the two distributions + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output samples + to be drawn per "rate"-parameterized distribution. + lam: A Tensor or Python value or N-D array of type `dtype`. + `lam` provides the rate parameter(s) describing the poisson + distribution(s) to sample. + dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or + `int64`. + seed: A Python integer. Used to create a random seed for the distributions. + See + `tf.random.set_seed` + for behavior. + name: Optional name for the operation. + + Returns: + samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` + with values of type `dtype`. + """ + with ops.name_scope(name, "random_poisson", [lam, shape]): + shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32) + seed1, seed2 = random_seed.get_seed(seed) + result = gen_random_ops.random_poisson_v2( + shape, lam, dtype=dtype, seed=seed1, seed2=seed2) + _maybe_set_static_shape_helper(result, shape, lam) + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ref_variable.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ref_variable.py new file mode 100644 index 0000000000000000000000000000000000000000..241275b44da30f78e70fd92edcce02c24eb54281 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ref_variable.py @@ -0,0 +1,1345 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RefVariable class.""" + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import variable_pb2 +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion_registry +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_state_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import resource_variables_toggle +from tensorflow.python.ops import state_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.ops import variable_v1 +from tensorflow.python.ops import variables +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.trackable import base as trackable +from tensorflow.python.types import core +from tensorflow.python.util import compat +from tensorflow.python.util.deprecation import deprecated + + +def default_variable_creator(next_creator=None, **kwargs): + """Default variable creator.""" + assert next_creator is None + initial_value = kwargs.get("initial_value", None) + trainable = kwargs.get("trainable", None) + collections = kwargs.get("collections", None) + validate_shape = kwargs.get("validate_shape", True) + caching_device = kwargs.get("caching_device", None) + name = kwargs.get("name", None) + variable_def = kwargs.get("variable_def", None) + dtype = kwargs.get("dtype", None) + expected_shape = kwargs.get("expected_shape", None) + import_scope = kwargs.get("import_scope", None) + constraint = kwargs.get("constraint", None) + use_resource = kwargs.get("use_resource", None) + synchronization = kwargs.get("synchronization", None) + aggregation = kwargs.get("aggregation", None) + shape = kwargs.get("shape", None) + + if use_resource is None: + use_resource = variable_scope.get_variable_scope().use_resource + if use_resource is None: + use_resource = resource_variables_toggle.resource_variables_enabled() + use_resource = use_resource or context.executing_eagerly() + if use_resource: + distribute_strategy = kwargs.get("distribute_strategy", None) + return resource_variable_ops.ResourceVariable( + initial_value=initial_value, + trainable=trainable, + collections=collections, + validate_shape=validate_shape, + caching_device=caching_device, + name=name, + dtype=dtype, + constraint=constraint, + variable_def=variable_def, + import_scope=import_scope, + distribute_strategy=distribute_strategy, + synchronization=synchronization, + aggregation=aggregation, + shape=shape) + else: + return RefVariable( + initial_value=initial_value, + trainable=trainable, + collections=collections, + validate_shape=validate_shape, + caching_device=caching_device, + name=name, + dtype=dtype, + constraint=constraint, + variable_def=variable_def, + expected_shape=expected_shape, + import_scope=import_scope, + synchronization=synchronization, + aggregation=aggregation, + shape=shape) + + +def _to_proto_fn(v, export_scope=None): + """Converts Variable and ResourceVariable to VariableDef for collections.""" + return v.to_proto(export_scope=export_scope) + + +def _from_proto_fn(v, import_scope=None): + """Creates Variable or ResourceVariable from VariableDef as needed.""" + if v.is_resource: + return resource_variable_ops.ResourceVariable.from_proto( + v, import_scope=import_scope) + return variable_v1.VariableV1.from_proto(v, import_scope=import_scope) + + +ops.register_proto_function( + ops.GraphKeys.GLOBAL_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.TRAINABLE_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.MOVING_AVERAGE_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.LOCAL_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.MODEL_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.GLOBAL_STEP, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) +ops.register_proto_function( + ops.GraphKeys.METRIC_VARIABLES, + proto_type=variable_pb2.VariableDef, + to_proto=_to_proto_fn, + from_proto=_from_proto_fn) + + +# TODO(apassos): do not repeat all comments here +class RefVariable(variable_v1.VariableV1, core.Tensor): + """Ref-based implementation of variables.""" + + def __init__( + self, # pylint: disable=super-init-not-called + initial_value=None, + trainable=None, + collections=None, + validate_shape=True, + caching_device=None, + name=None, + variable_def=None, + dtype=None, + expected_shape=None, + import_scope=None, + constraint=None, + synchronization=None, + aggregation=None, + shape=None): + """Creates a new variable with value `initial_value`. + + The new variable is added to the graph collections listed in `collections`, + which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + + If `trainable` is `True` the variable is also added to the graph collection + `GraphKeys.TRAINABLE_VARIABLES`. + + This constructor creates both a `variable` Op and an `assign` Op to set the + variable to its initial value. + + Args: + initial_value: A `Tensor`, or Python object convertible to a `Tensor`, + which is the initial value for the Variable. The initial value must have + a shape specified unless `validate_shape` is set to False. Can also be a + callable with no argument that returns the initial value when called. In + that case, `dtype` must be specified. (Note that initializer functions + from init_ops.py must first be bound to a shape before being used here.) + trainable: If `True`, also adds the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default + list of variables to use by the `Optimizer` classes. Defaults to `True`, + unless `synchronization` is set to `ON_READ`, in which case it defaults + to `False`. + collections: List of graph collections keys. The new variable is added to + these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + validate_shape: If `False`, allows the variable to be initialized with a + value of unknown shape. If `True`, the default, the shape of + `initial_value` must be known. + caching_device: Optional device string describing where the Variable + should be cached for reading. Defaults to the Variable's device. If not + `None`, caches on another device. Typical use is to cache on the device + where the Ops using the Variable reside, to deduplicate copying through + `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + variable_def: `VariableDef` protocol buffer. If not `None`, recreates the + Variable object with its contents, referencing the variable's nodes in + the graph, which must already exist. The graph is not changed. + `variable_def` and the other arguments are mutually exclusive. + dtype: If set, initial_value will be converted to the given type. If + `None`, either the datatype will be kept (if `initial_value` is a + Tensor), or `convert_to_tensor` will decide. + expected_shape: A TensorShape. If set, initial_value is expected to have + this shape. + import_scope: Optional `string`. Name scope to add to the `Variable.` Only + used when initializing from protocol buffer. + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + shape: (optional) The shape of this variable. If None, the shape of + `initial_value` will be used. When setting this argument to + `tf.TensorShape(None)` (representing an unspecified shape), the variable + can be assigned with values of different shapes. + + Raises: + ValueError: If both `variable_def` and initial_value are specified. + ValueError: If the initial value is not specified, or does not have a + shape and `validate_shape` is `True`. + RuntimeError: If eager execution is enabled. + """ + self._in_graph_mode = True + if variable_def: + # If variable_def is provided, recreates the variable from its fields. + if initial_value: + raise ValueError("variable_def and initial_value are mutually " + "exclusive.") + self._init_from_proto(variable_def, import_scope=import_scope) + else: + # Create from initial_value. + self._init_from_args( + initial_value=initial_value, + trainable=trainable, + collections=collections, + validate_shape=validate_shape, + caching_device=caching_device, + name=name, + dtype=dtype, + expected_shape=expected_shape, + constraint=constraint, + synchronization=synchronization, + aggregation=aggregation, + shape=shape) + + def __repr__(self): + if context.executing_eagerly() and not self._in_graph_mode: + return "" % ( + self.name, self.get_shape(), self.dtype.name, + ops.numpy_text(self.read_value(), is_repr=True)) + else: + return "" % ( + self.name, self.get_shape(), self.dtype.name) + + def _init_from_args(self, + initial_value=None, + trainable=None, + collections=None, + validate_shape=True, + caching_device=None, + name=None, + dtype=None, + expected_shape=None, + constraint=None, + synchronization=None, + aggregation=None, + shape=None): + """Creates a new variable from arguments. + + Args: + initial_value: A `Tensor`, or Python object convertible to a `Tensor`, + which is the initial value for the Variable. The initial value must have + a shape specified unless `validate_shape` is set to False. Can also be a + callable with no argument that returns the initial value when called. + (Note that initializer functions from init_ops.py must first be bound to + a shape before being used here.) + trainable: If `True`, also adds the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default + list of variables to use by the `Optimizer` classes. Defaults to `True`, + unless `synchronization` is set to `ON_READ`, in which case it defaults + to `False`. + collections: List of graph collections keys. The new variable is added to + these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + validate_shape: If `False`, allows the variable to be initialized with a + value of unknown shape. If `True`, the default, the shape of + `initial_value` must be known. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + dtype: If set, initial_value will be converted to the given type. If None, + either the datatype will be kept (if initial_value is a Tensor) or + float32 will be used (if it is a Python object convertible to a Tensor). + expected_shape: Deprecated. Ignored. + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + shape: (optional) The shape of this variable. If None, the shape of + `initial_value` will be used. When setting this argument to + `tf.TensorShape(None)` (representing an unspecified shape), the variable + can be assigned with values of different shapes. + + Raises: + ValueError: If the initial value is not specified, or does not have a + shape and `validate_shape` is `True`. + RuntimeError: If lifted into the eager context. + """ + _ = expected_shape + if initial_value is None: + raise ValueError("initial_value must be specified.") + init_from_fn = callable(initial_value) + + if collections is None: + collections = [ops.GraphKeys.GLOBAL_VARIABLES] + if not isinstance(collections, (list, tuple, set)): + raise ValueError( + "collections argument to Variable constructor must be a list, tuple, " + "or set. Got %s of type %s" % (collections, type(collections))) + if constraint is not None and not callable(constraint): + raise ValueError("The `constraint` argument must be a callable.") + + # Store the graph key so optimizers know how to only retrieve variables from + # this graph. + self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access + if isinstance(initial_value, trackable.CheckpointInitialValue): + self._maybe_initialize_trackable() + self._update_uid = initial_value.checkpoint_position.restore_uid + initial_value = initial_value.wrapped_value + + synchronization, aggregation, trainable = ( + variables.validate_synchronization_aggregation_trainable( + synchronization, aggregation, trainable, name)) + self._synchronization = synchronization + self._aggregation = aggregation + self._trainable = trainable + if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: + collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] + with ops.init_scope(): + # Ensure that we weren't lifted into the eager context. + if context.executing_eagerly(): + raise RuntimeError( + "Reference variables are not supported when eager execution is " + "enabled. Please run `tf.compat.v1.enable_resource_variables()` to " + "switch to resource variables.") + with ops.name_scope(name, "Variable", + [] if init_from_fn else [initial_value]) as name: + + if init_from_fn: + # Use attr_scope and device(None) to simulate the behavior of + # colocate_with when the variable we want to colocate with doesn't + # yet exist. + true_name = ops.name_from_scope_name(name) # pylint: disable=protected-access + attr = attr_value_pb2.AttrValue( + list=attr_value_pb2.AttrValue.ListValue( + s=[compat.as_bytes("loc:@%s" % true_name)])) + # pylint: disable=protected-access + with ops.get_default_graph()._attr_scope({"_class": attr}): + with ops.name_scope("Initializer"), ops.device(None): + initial_value = initial_value() + if isinstance(initial_value, trackable.CheckpointInitialValue): + self._maybe_initialize_trackable() + self._update_uid = initial_value.checkpoint_position.restore_uid + initial_value = initial_value.wrapped_value + self._initial_value = ops.convert_to_tensor( + initial_value, name="initial_value", dtype=dtype) + if shape is None: + shape = ( + self._initial_value.get_shape() + if validate_shape else tensor_shape.unknown_shape()) + self._variable = state_ops.variable_op_v2( + shape, self._initial_value.dtype.base_dtype, name=name) + # pylint: enable=protected-access + + # Or get the initial value from a Tensor or Python object. + else: + self._initial_value = ops.convert_to_tensor( + initial_value, name="initial_value", dtype=dtype) + # pylint: disable=protected-access + if self._initial_value.op._get_control_flow_context() is not None: + raise ValueError( + "Initializer for variable %s is from inside a control-flow " + "construct, such as a loop or conditional. When creating a " + "variable inside a loop or conditional, use a lambda as the " + "initializer." % name) + if shape is None: + # pylint: enable=protected-access + shape = ( + self._initial_value.get_shape() + if validate_shape else tensor_shape.unknown_shape()) + # In this case, the variable op can't be created until after the + # initial_value has been converted to a Tensor with a known type. + self._variable = state_ops.variable_op_v2( + shape, self._initial_value.dtype.base_dtype, name=name) + + # Cache the name in `self`, because some APIs call `Variable.name` in a + # tight loop, and this halves the cost. + self._name = self._variable.name + + # Manually overrides the variable's shape with the initial value's. + if validate_shape: + initial_value_shape = self._initial_value.get_shape() + if not initial_value_shape.is_fully_defined(): + raise ValueError("initial_value must have a shape specified: %s" % + self._initial_value) + + # If 'initial_value' makes use of other variables, make sure we don't + # have an issue if these other variables aren't initialized first by + # using their initialized_value() method. + self._initializer_op = state_ops.assign( + self._variable, + variables._try_guard_against_uninitialized_dependencies( # pylint: disable=protected-access + name, self._initial_value), + validate_shape=validate_shape).op + + # TODO(vrv): Change this class to not take caching_device, but + # to take the op to colocate the snapshot with, so we can use + # colocation rather than devices. + if caching_device is not None: + with ops.device(caching_device): + self._snapshot = array_ops.identity(self._variable, name="read") + else: + with ops.colocate_with(self._variable.op): + self._snapshot = array_ops.identity(self._variable, name="read") + ops.add_to_collections(collections, self) + + self._caching_device = caching_device + self._save_slice_info = None + self._constraint = constraint + + def _init_from_proto(self, variable_def, import_scope=None): + """Recreates the Variable object from a `VariableDef` protocol buffer. + + Args: + variable_def: `VariableDef` protocol buffer, describing a variable whose + nodes already exists in the graph. + import_scope: Optional `string`. Name scope to add. + """ + assert isinstance(variable_def, variable_pb2.VariableDef) + # Create from variable_def. + g = ops.get_default_graph() + self._variable = g.as_graph_element( + ops.prepend_name_scope( + variable_def.variable_name, import_scope=import_scope)) + self._name = self._variable.name + self._initializer_op = g.as_graph_element( + ops.prepend_name_scope( + variable_def.initializer_name, import_scope=import_scope)) + # Tests whether initial_value_name exists first for backwards compatibility. + if (hasattr(variable_def, "initial_value_name") and + variable_def.initial_value_name): + self._initial_value = g.as_graph_element( + ops.prepend_name_scope( + variable_def.initial_value_name, import_scope=import_scope)) + else: + self._initial_value = None + synchronization, aggregation, trainable = ( + variables.validate_synchronization_aggregation_trainable( + variable_def.synchronization, variable_def.aggregation, + variable_def.trainable, variable_def.variable_name)) + self._synchronization = synchronization + self._aggregation = aggregation + self._trainable = trainable + self._snapshot = g.as_graph_element( + ops.prepend_name_scope( + variable_def.snapshot_name, import_scope=import_scope)) + if variable_def.HasField("save_slice_info_def"): + self._save_slice_info = variables.Variable.SaveSliceInfo( + save_slice_info_def=variable_def.save_slice_info_def, + import_scope=import_scope) + else: + self._save_slice_info = None + self._caching_device = None + self._constraint = None + + def _as_graph_element(self): + """Conversion function for Graph.as_graph_element().""" + return self._variable + + def value(self): + """Returns the last snapshot of this variable. + + You usually do not need to call this method as all ops that need the value + of the variable call it automatically through a `convert_to_tensor()` call. + + Returns a `Tensor` which holds the value of the variable. You can not + assign a new value to this tensor as it is not a reference to the variable. + + To avoid copies, if the consumer of the returned value is on the same device + as the variable, this actually returns the live value of the variable, not + a copy. Updates to the variable are seen by the consumer. If the consumer + is on a different device it will get a copy of the variable. + + Returns: + A `Tensor` containing the value of the variable. + """ + return self._snapshot + + def read_value(self): + """Returns the value of this variable, read in the current context. + + Can be different from value() if it's on another device, with control + dependencies, etc. + + Returns: + A `Tensor` containing the value of the variable. + """ + return array_ops.identity(self._variable, name="read") + + def _ref(self): + """Returns a reference to this variable. + + You usually do not need to call this method as all ops that need a reference + to the variable call it automatically. + + Returns is a `Tensor` which holds a reference to the variable. You can + assign a new value to the variable by passing the tensor to an assign op. + See `tf.Variable.value` if you want to get the value of the + variable. + + Returns: + A `Tensor` that is a reference to the variable. + """ + return self._variable + + def set_shape(self, shape): + """Overrides the shape for this variable. + + Args: + shape: the `TensorShape` representing the overridden shape. + """ + self._ref().set_shape(shape) + self.value().set_shape(shape) + + @property + def trainable(self): + return self._trainable + + @property + def synchronization(self): + return self._synchronization + + @property + def aggregation(self): + return self._aggregation + + def eval(self, session=None): + """In a session, computes and returns the value of this variable. + + This is not a graph construction method, it does not add ops to the graph. + + This convenience method requires a session where the graph + containing this variable has been launched. If no session is + passed, the default session is used. See `tf.compat.v1.Session` for more + information on launching a graph and on sessions. + + ```python + v = tf.Variable([1, 2]) + init = tf.compat.v1.global_variables_initializer() + + with tf.compat.v1.Session() as sess: + sess.run(init) + # Usage passing the session explicitly. + print(v.eval(sess)) + # Usage with the default session. The 'with' block + # above makes 'sess' the default session. + print(v.eval()) + ``` + + Args: + session: The session to use to evaluate this variable. If none, the + default session is used. + + Returns: + A numpy `ndarray` with a copy of the value of this variable. + """ + return self._variable.eval(session=session) + + @property + def initial_value(self): + """Returns the Tensor used as the initial value for the variable. + + Note that this is different from `initialized_value()` which runs + the op that initializes the variable before returning its value. + This method returns the tensor that is used by the op that initializes + the variable. + + Returns: + A `Tensor`. + """ + return self._initial_value + + @property + def constraint(self): + """Returns the constraint function associated with this variable. + + Returns: + The constraint function that was passed to the variable constructor. + Can be `None` if no constraint was passed. + """ + return self._constraint + + def assign(self, value, use_locking=False, name=None, read_value=True): + """Assigns a new value to the variable. + + This is essentially a shortcut for `assign(self, value)`. + + Args: + value: A `Tensor`. The new value for this variable. + use_locking: If `True`, use locking during the assignment. + name: The name of the operation to be created + read_value: if True, will return something which evaluates to the new + value of the variable; if False will return the assign op. + + Returns: + A `Tensor` that will hold the new value of this variable after + the assignment has completed. + """ + assign = state_ops.assign( + self._variable, value, use_locking=use_locking, name=name) + if read_value: + return assign + return assign.op + + def assign_add(self, delta, use_locking=False, name=None, read_value=True): + """Adds a value to this variable. + + This is essentially a shortcut for `assign_add(self, delta)`. + + Args: + delta: A `Tensor`. The value to add to this variable. + use_locking: If `True`, use locking during the operation. + name: The name of the operation to be created + read_value: if True, will return something which evaluates to the new + value of the variable; if False will return the assign op. + + Returns: + A `Tensor` that will hold the new value of this variable after + the addition has completed. + """ + assign = state_ops.assign_add( + self._variable, delta, use_locking=use_locking, name=name) + if read_value: + return assign + return assign.op + + def assign_sub(self, delta, use_locking=False, name=None, read_value=True): + """Subtracts a value from this variable. + + This is essentially a shortcut for `assign_sub(self, delta)`. + + Args: + delta: A `Tensor`. The value to subtract from this variable. + use_locking: If `True`, use locking during the operation. + name: The name of the operation to be created + read_value: if True, will return something which evaluates to the new + value of the variable; if False will return the assign op. + + Returns: + A `Tensor` that will hold the new value of this variable after + the subtraction has completed. + """ + assign = state_ops.assign_sub( + self._variable, delta, use_locking=use_locking, name=name) + if read_value: + return assign + return assign.op + + def scatter_sub(self, sparse_delta, use_locking=False, name=None): + """Subtracts `tf.IndexedSlices` from this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered subtraction has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_sub( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_add(self, sparse_delta, use_locking=False, name=None): + """Adds `tf.IndexedSlices` to this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be added to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered addition has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_add( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_max(self, sparse_delta, use_locking=False, name=None): + """Updates this variable with the max of `tf.IndexedSlices` and itself. + + Args: + sparse_delta: `tf.IndexedSlices` to use as an argument of max with this + variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered maximization has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_max( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_min(self, sparse_delta, use_locking=False, name=None): + """Updates this variable with the min of `tf.IndexedSlices` and itself. + + Args: + sparse_delta: `tf.IndexedSlices` to use as an argument of min with this + variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered minimization has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_min( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_mul(self, sparse_delta, use_locking=False, name=None): + """Multiply this variable by `tf.IndexedSlices`. + + Args: + sparse_delta: `tf.IndexedSlices` to multiply this variable by. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered multiplication has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_mul( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_div(self, sparse_delta, use_locking=False, name=None): + """Divide this variable by `tf.IndexedSlices`. + + Args: + sparse_delta: `tf.IndexedSlices` to divide this variable by. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered division has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_div( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_update(self, sparse_delta, use_locking=False, name=None): + """Assigns `tf.IndexedSlices` to this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be assigned to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered assignment has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) + return gen_state_ops.scatter_update( + self._variable, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): + """Assigns `tf.IndexedSlices` to this variable batch-wise. + + Analogous to `batch_gather`. This assumes that this variable and the + sparse_delta IndexedSlices have a series of leading dimensions that are the + same for all of them, and the updates are performed on the last dimension of + indices. In other words, the dimensions should be the following: + + `num_prefix_dims = sparse_delta.indices.ndims - 1` + `batch_dim = num_prefix_dims + 1` + `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ + batch_dim:]` + + where + + `sparse_delta.updates.shape[:num_prefix_dims]` + `== sparse_delta.indices.shape[:num_prefix_dims]` + `== var.shape[:num_prefix_dims]` + + And the operation performed can be expressed as: + + `var[i_1, ..., i_n, + sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ + i_1, ..., i_n, j]` + + When sparse_delta.indices is a 1D tensor, this operation is equivalent to + `scatter_update`. + + To avoid this operation one can looping over the first `ndims` of the + variable and using `scatter_update` on the subtensors that result of slicing + the first dimension. This is a valid option for `ndims = 1`, but less + efficient than this implementation. + + Args: + sparse_delta: `tf.IndexedSlices` to be assigned to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered assignment has completed. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + return state_ops.batch_scatter_update( + self, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name) + + def scatter_nd_sub(self, indices, updates, name=None): + """Applies sparse subtraction to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + op = ref.scatter_nd_sub(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(op) + ``` + + The resulting update to ref would look like this: + + [1, -9, 3, -6, -6, 6, 7, -4] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered subtraction has completed. + """ + return gen_state_ops.scatter_nd_sub( + self._variable, indices, updates, use_locking=True, name=name) + + def scatter_nd_add(self, indices, updates, name=None): + """Applies sparse addition to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + add = ref.scatter_nd_add(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(add) + ``` + + The resulting update to ref would look like this: + + [1, 13, 3, 14, 14, 6, 7, 20] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered addition has completed. + """ + return gen_state_ops.scatter_nd_add( + self._variable, indices, updates, use_locking=True, name=name) + + def scatter_nd_update(self, indices, updates, name=None): + """Applies sparse assignment to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + op = ref.scatter_nd_update(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(op) + ``` + + The resulting update to ref would look like this: + + [1, 11, 3, 10, 9, 6, 7, 12] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered assignment has completed. + """ + return gen_state_ops.scatter_nd_update( + self._variable, indices, updates, use_locking=True, name=name) + + def scatter_nd_max(self, indices, updates, name=None): + """Updates this variable with the max of `tf.IndexedSlices` and itself. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered addition has completed. + """ + return gen_state_ops.scatter_nd_max( + self._variable, indices, updates, use_locking=True, name=name) + + def scatter_nd_min(self, indices, updates, name=None): + """Updates this variable with the min of `tf.IndexedSlices` and itself. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + A `Tensor` that will hold the new value of this variable after + the scattered addition has completed. + """ + return gen_state_ops.scatter_nd_min( + self._variable, indices, updates, use_locking=True, name=name) + + def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask, + end_mask, ellipsis_mask, new_axis_mask, + shrink_axis_mask): + return gen_array_ops.strided_slice_assign( + ref=self._ref(), + begin=begin, + end=end, + strides=strides, + value=value, + name=name, + begin_mask=begin_mask, + end_mask=end_mask, + ellipsis_mask=ellipsis_mask, + new_axis_mask=new_axis_mask, + shrink_axis_mask=shrink_axis_mask) + + @deprecated(None, "Prefer Dataset.range instead.") + def count_up_to(self, limit): + """Increments this variable until it reaches `limit`. + + When that Op is run it tries to increment the variable by `1`. If + incrementing the variable would bring it above `limit` then the Op raises + the exception `OutOfRangeError`. + + If no error is raised, the Op outputs the value of the variable before + the increment. + + This is essentially a shortcut for `count_up_to(self, limit)`. + + Args: + limit: value at which incrementing the variable raises an error. + + Returns: + A `Tensor` that will hold the variable value before the increment. If no + other Op modifies this variable, the values produced will all be + distinct. + """ + return state_ops.count_up_to(self._variable, limit=limit) + + # Conversion to tensor. + @staticmethod + def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name + """Utility function for converting a Variable to a Tensor.""" + _ = name + if dtype and not dtype.is_compatible_with(v.dtype): + raise ValueError( + "Incompatible type conversion requested to type '%s' for variable " + "of type '%s'" % (dtype.name, v.dtype.name)) + if as_ref: + return v._ref() # pylint: disable=protected-access + else: + return v.value() + + # NOTE(mrry): This enables the Variable's overloaded "right" binary + # operators to run when the left operand is an ndarray, because it + # accords the Variable class higher priority than an ndarray, or a + # numpy matrix. + # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ + # mechanism, which allows more control over how Variables interact + # with ndarrays. + __array_priority__ = 100 + + @property + def name(self): + """The name of this variable.""" + return self._name + + @property + def initializer(self) -> ops.Operation: + """The initializer operation for this variable.""" + return self._initializer_op + + @property + def device(self): + """The device of this variable.""" + return self._variable.device + + @property + def dtype(self) -> dtypes.DType: + """The `DType` of this variable.""" + return self._variable.dtype + + @property + def op(self) -> ops.Operation: + """The `Operation` of this variable.""" + return self._variable.op + + @property + def graph(self) -> ops.Graph: + """The `Graph` of this variable.""" + return self._variable.graph + + @property + def _distribute_strategy(self): + """The `tf.distribute.Strategy` that this variable was created under.""" + return None # Ref variables are never created inside a strategy. + + @property + def shape(self): + """The `TensorShape` of this variable. + + Returns: + A `TensorShape`. + """ + return self._variable.get_shape() + + def to_proto(self, export_scope=None): + """Converts a `Variable` to a `VariableDef` protocol buffer. + + Args: + export_scope: Optional `string`. Name scope to remove. + + Returns: + A `VariableDef` protocol buffer, or `None` if the `Variable` is not + in the specified name scope. + """ + if (export_scope is None or self._variable.name.startswith(export_scope)): + var_def = variable_pb2.VariableDef() + var_def.variable_name = ops.strip_name_scope(self._variable.name, + export_scope) + if self._initial_value is not None: + # For backwards compatibility. + var_def.initial_value_name = ops.strip_name_scope( + self._initial_value.name, export_scope) + var_def.trainable = self.trainable + var_def.synchronization = self.synchronization.value + var_def.aggregation = self.aggregation.value + var_def.initializer_name = ops.strip_name_scope(self.initializer.name, + export_scope) + var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, + export_scope) + if self._save_slice_info: + var_def.save_slice_info_def.MergeFrom( + self._save_slice_info.to_proto(export_scope=export_scope)) + return var_def + else: + return None + + def __iadd__(self, other): + logging.log_first_n( + logging.WARN, "Variable += will be deprecated. Use variable.assign_add" + " if you want assignment to the variable value or 'x = x + y'" + " if you want a new python Tensor object.", 1) + return self + other + + def __isub__(self, other): + logging.log_first_n( + logging.WARN, "Variable -= will be deprecated. Use variable.assign_sub" + " if you want assignment to the variable value or 'x = x - y'" + " if you want a new python Tensor object.", 1) + return self - other + + def __imul__(self, other): + logging.log_first_n( + logging.WARN, + "Variable *= will be deprecated. Use `var.assign(var * other)`" + " if you want assignment to the variable value or `x = x * y`" + " if you want a new python Tensor object.", 1) + return self * other + + def __idiv__(self, other): + logging.log_first_n( + logging.WARN, + "Variable /= will be deprecated. Use `var.assign(var / other)`" + " if you want assignment to the variable value or `x = x / y`" + " if you want a new python Tensor object.", 1) + return self / other + + def __itruediv__(self, other): + logging.log_first_n( + logging.WARN, + "Variable /= will be deprecated. Use `var.assign(var / other)`" + " if you want assignment to the variable value or `x = x / y`" + " if you want a new python Tensor object.", 1) + return self / other + + def __irealdiv__(self, other): + logging.log_first_n( + logging.WARN, + "Variable /= will be deprecated. Use `var.assign(var / other)`" + " if you want assignment to the variable value or `x = x / y`" + " if you want a new python Tensor object.", 1) + return self / other + + def __ipow__(self, other): + logging.log_first_n( + logging.WARN, + "Variable **= will be deprecated. Use `var.assign(var ** other)`" + " if you want assignment to the variable value or `x = x ** y`" + " if you want a new python Tensor object.", 1) + return self**other + + def _serialize_to_tensors(self): + """Implements Trackable._serialize_to_tensors.""" + return {trackable.VARIABLE_VALUE_KEY: self} + + def _restore_from_tensors(self, restored_tensors): + """Implements Trackable._restore_from_tensors.""" + restored_tensor = restored_tensors[trackable.VARIABLE_VALUE_KEY] + return state_ops.assign( + self, + restored_tensor, + validate_shape=self.get_shape().is_fully_defined()) + + +# Register a conversion function which reads the value of the variable, +# allowing instances of the class to be used as tensors. +tensor_conversion_registry.register_tensor_conversion_function( + RefVariable, RefVariable._TensorConversionFunction) # pylint: disable=protected-access diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resources.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resources.py new file mode 100644 index 0000000000000000000000000000000000000000..b1afb75d51f9a0308f4b3feff6bc26a94ecbc67b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resources.py @@ -0,0 +1,117 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +"""Utilities for using generic resources.""" +# pylint: disable=g-bad-name +import collections +import os + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.util import tf_should_use + + +_Resource = collections.namedtuple("_Resource", + ["handle", "create", "is_initialized"]) + + +def register_resource(handle, create_op, is_initialized_op, is_shared=True): + """Registers a resource into the appropriate collections. + + This makes the resource findable in either the shared or local resources + collection. + + Args: + handle: op which returns a handle for the resource. + create_op: op which initializes the resource. + is_initialized_op: op which returns a scalar boolean tensor of whether + the resource has been initialized. + is_shared: if True, the resource gets added to the shared resource + collection; otherwise it gets added to the local resource collection. + + """ + resource = _Resource(handle, create_op, is_initialized_op) + if is_shared: + ops.add_to_collection(ops.GraphKeys.RESOURCES, resource) + else: + ops.add_to_collection(ops.GraphKeys.LOCAL_RESOURCES, resource) + + +def shared_resources(): + """Returns resources visible to all tasks in the cluster.""" + return ops.get_collection(ops.GraphKeys.RESOURCES) + + +def local_resources(): + """Returns resources intended to be local to this session.""" + return ops.get_collection(ops.GraphKeys.LOCAL_RESOURCES) + + +def report_uninitialized_resources(resource_list=None, + name="report_uninitialized_resources"): + """Returns the names of all uninitialized resources in resource_list. + + If the returned tensor is empty then all resources have been initialized. + + Args: + resource_list: resources to check. If None, will use shared_resources() + + local_resources(). + name: name for the resource-checking op. + + Returns: + Tensor containing names of the handles of all resources which have not + yet been initialized. + + """ + if resource_list is None: + resource_list = shared_resources() + local_resources() + with ops.name_scope(name): + # Run all operations on CPU + local_device = os.environ.get( + "TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0") + with ops.device(local_device): + if not resource_list: + # Return an empty tensor so we only need to check for returned tensor + # size being 0 as an indication of model ready. + return array_ops.constant([], dtype=dtypes.string) + # Get a 1-D boolean tensor listing whether each resource is initialized. + variables_mask = math_ops.logical_not( + array_ops_stack.stack([r.is_initialized for r in resource_list])) + # Get a 1-D string tensor containing all the resource names. + variable_names_tensor = array_ops.constant( + [s.handle.name for s in resource_list]) + # Return a 1-D tensor containing all the names of uninitialized resources. + return array_ops.boolean_mask(variable_names_tensor, variables_mask) + + +@tf_should_use.should_use_result +def initialize_resources(resource_list, name="init"): + """Initializes the resources in the given list. + + Args: + resource_list: list of resources to initialize. + name: name of the initialization op. + + Returns: + op responsible for initializing all resources. + """ + if resource_list: + return control_flow_ops.group(*[r.create for r in resource_list], name=name) + return control_flow_ops.no_op(name=name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_cell_impl.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_cell_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..9cff803335c6a3ae418b958c3aba0353d11c79bb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_cell_impl.py @@ -0,0 +1,179 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module implementing RNN Cells. + +This module provides a number of basic commonly used RNN cells, such as LSTM +(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of +operators that allow adding dropouts, projections, or embeddings for inputs. +Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by +calling the `rnn` ops several times. +""" +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.keras.layers.legacy_rnn import rnn_cell_impl +from tensorflow.python.ops import array_ops +from tensorflow.python.util import nest + +# Remove caller that rely on private symbol in future. +_BIAS_VARIABLE_NAME = "bias" +_WEIGHTS_VARIABLE_NAME = "kernel" + +BasicLSTMCell = rnn_cell_impl.BasicLSTMCell +BasicRNNCell = rnn_cell_impl.BasicRNNCell +DeviceWrapper = rnn_cell_impl.DeviceWrapper +DropoutWrapper = rnn_cell_impl.DropoutWrapper +GRUCell = rnn_cell_impl.GRUCell +LayerRNNCell = rnn_cell_impl.LayerRNNCell +LSTMCell = rnn_cell_impl.LSTMCell +LSTMStateTuple = rnn_cell_impl.LSTMStateTuple +MultiRNNCell = rnn_cell_impl.MultiRNNCell +ResidualWrapper = rnn_cell_impl.ResidualWrapper +RNNCell = rnn_cell_impl.RNNCell + + +def _zero_state_tensors(state_size, batch_size, dtype): + """Create tensors of zeros based on state_size, batch_size, and dtype.""" + + def get_state_shape(s): + """Combine s with batch_size to get a proper tensor shape.""" + c = _concat(batch_size, s) + size = array_ops.zeros(c, dtype=dtype) + if not context.executing_eagerly(): + c_static = _concat(batch_size, s, static=True) + size.set_shape(c_static) + return size + + return nest.map_structure(get_state_shape, state_size) + + +def _concat(prefix, suffix, static=False): + """Concat that enables int, Tensor, or TensorShape values. + + This function takes a size specification, which can be an integer, a + TensorShape, or a Tensor, and converts it into a concatenated Tensor + (if static = False) or a list of integers (if static = True). + + Args: + prefix: The prefix; usually the batch size (and/or time step size). + (TensorShape, int, or Tensor.) + suffix: TensorShape, int, or Tensor. + static: If `True`, return a python list with possibly unknown dimensions. + Otherwise return a `Tensor`. + + Returns: + shape: the concatenation of prefix and suffix. + + Raises: + ValueError: if `suffix` is not a scalar or vector (or TensorShape). + ValueError: if prefix or suffix was `None` and asked for dynamic + Tensors out. + """ + if isinstance(prefix, tensor.Tensor): + p = prefix + p_static = tensor_util.constant_value(prefix) + if p.shape.ndims == 0: + p = array_ops.expand_dims(p, 0) + elif p.shape.ndims != 1: + raise ValueError( + "prefix tensor must be either a scalar or vector, but saw tensor: %s" + % p + ) + else: + p = tensor_shape.TensorShape(prefix) + p_static = p.as_list() if p.ndims is not None else None + p = ( + constant_op.constant(p.as_list(), dtype=dtypes.int32) + if p.is_fully_defined() + else None + ) + if isinstance(suffix, tensor.Tensor): + s = suffix + s_static = tensor_util.constant_value(suffix) + if s.shape.ndims == 0: + s = array_ops.expand_dims(s, 0) + elif s.shape.ndims != 1: + raise ValueError( + "suffix tensor must be either a scalar or vector, but saw tensor: %s" + % s + ) + else: + s = tensor_shape.TensorShape(suffix) + s_static = s.as_list() if s.ndims is not None else None + s = ( + constant_op.constant(s.as_list(), dtype=dtypes.int32) + if s.is_fully_defined() + else None + ) + + if static: + shape = tensor_shape.TensorShape(p_static).concatenate(s_static) + shape = shape.as_list() if shape.ndims is not None else None + else: + if p is None or s is None: + raise ValueError( + "Provided a prefix or suffix of None: %s and %s" % (prefix, suffix) + ) + shape = array_ops.concat((p, s), 0) + return shape + + +def _hasattr(obj, attr_name): + try: + getattr(obj, attr_name) + except AttributeError: + return False + else: + return True + + +def assert_like_rnncell(cell_name, cell): + """Raises a TypeError if cell is not like an RNNCell. + + NOTE: Do not rely on the error message (in particular in tests) which can be + subject to change to increase readability. Use + ASSERT_LIKE_RNNCELL_ERROR_REGEXP. + + Args: + cell_name: A string to give a meaningful error referencing to the name of + the functionargument. + cell: The object which should behave like an RNNCell. + + Raises: + TypeError: A human-friendly exception. + """ + conditions = [ + _hasattr(cell, "output_size"), + _hasattr(cell, "state_size"), + _hasattr(cell, "get_initial_state") or _hasattr(cell, "zero_state"), + callable(cell), + ] + errors = [ + "'output_size' property is missing", + "'state_size' property is missing", + "either 'zero_state' or 'get_initial_state' method is required", + "is not callable", + ] + + if not all(conditions): + errors = [error for error, cond in zip(errors, conditions) if not cond] + raise TypeError( + "The argument {!r} ({}) is not an RNNCell: {}.".format( + cell_name, cell, ", ".join(errors) + ) + ) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_grad.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c420ef5352597e7d2de2e7175e8c85ecc58ed2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/rnn_grad.py @@ -0,0 +1,51 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradients for (block) GRU/LSTM operators.""" +from tensorflow.python.framework import ops +from tensorflow.python.ops import gen_rnn_ops + + +def _block_lstm_grad(op, *grads): + """Gradient for the BlockLSTM op.""" + seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs + i, cs, f, o, ci, co, h = op.outputs + _, cs_grad, _, _, _, _, h_grad = grads + (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, + b_grad) = gen_rnn_ops.block_lstm_grad( + seq_len_max=seq_len_max, + x=x, + cs_prev=cs_prev, + h_prev=h_prev, + w=w, + wci=wci, + wcf=wcf, + wco=wco, + b=b, + i=i, + cs=cs, + f=f, + o=o, + ci=ci, + co=co, + h=h, + cs_grad=cs_grad, + h_grad=h_grad, + use_peephole=op.get_attr("use_peephole")) + return (None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, + wco_grad, b_grad) + + +ops.RegisterGradient("BlockLSTM")(_block_lstm_grad) +ops.RegisterGradient("BlockLSTMV2")(_block_lstm_grad) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/session_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/session_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9ef5794fb1a67c77c4818b9c189cbf1735b95f49 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/session_ops.py @@ -0,0 +1,299 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tensor Handle Operations.""" + +# pylint: disable=g-bad-name +import numpy as np + +from tensorflow.core.framework import resource_handle_pb2 +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.framework import device as pydev +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_data_flow_ops +from tensorflow.python.util import compat +from tensorflow.python.util.tf_export import tf_export + + +def encode_resource_handle(resource_handle): + """Encode a ResourceHandle proto as custom numpy struct type.""" + return np.asarray(bytearray(resource_handle.SerializeToString()), + dtype=dtypes.np_resource) + + +class TensorHandle: + """Represents a handle for a live tensor in a session.""" + + def __init__(self, handle, dtype, session): + """Constructs a new tensor handle. + + A tensor handle for a persistent tensor is a python string + that has the form of "tensor_name;unique_id;device_name". + + Args: + handle: A tensor handle. + dtype: The data type of the tensor represented by `handle`. + session: The session in which the tensor is produced. + """ + self._handle = compat.as_str_any(handle) + self._resource_handle = None + self._dtype = dtype + self._session = session + self._auto_gc_enabled = True + + def __del__(self): + if self._auto_gc_enabled: + self._session._register_dead_handle(self.handle) + + def __str__(self): + return self._handle + + def _get_resource_handle(self): + """The ResourceHandle representation of this handle.""" + if not self._resource_handle: + self._resource_handle = resource_handle_pb2.ResourceHandleProto() + self._resource_handle.device = self._handle.split(";")[-1] + self._resource_handle.container = (pywrap_tf_session.TENSOR_HANDLE_KEY) + self._resource_handle.name = self._handle + return self._resource_handle + + def to_numpy_array(self): + """Convert a TensorHandle object to a feedable numpy value. + + Returns: + A numpy array of a custom struct type that can be used as a feed value + to run(). + """ + return encode_resource_handle(self._get_resource_handle()) + + @property + def handle(self): + """The string representation of this handle.""" + return self._handle + + def eval(self): + """Return the value of the tensor represented by this handle.""" + if not self._auto_gc_enabled: + raise TypeError("Persistent tensor %s may have already been deleted." + % self.handle) + holder, reader = _get_handle_reader(self._session.graph, self._handle, + self._dtype) + return self._session.run(reader, feed_dict={holder: self._handle}) + + def delete(self): + """Force the deletion of this persistent tensor.""" + if not self._auto_gc_enabled: + raise TypeError("Persistent tensor %s may have already been deleted." + % self.handle) + self._auto_gc_enabled = False + holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle) + self._session.run(deleter, feed_dict={holder: self.handle}) + + def get_raw_handle(self): + """Return the raw handle of the tensor. + + Note that the method disables the automatic garbage collection of this + persistent tensor. The caller is now responsible for managing the life + time of the tensor. + """ + self._auto_gc_enabled = False + return self._handle + + @staticmethod + def _get_device_name(handle): + """The device name encoded in the handle.""" + handle_str = compat.as_str_any(handle) + return pydev.canonical_name(handle_str.split(";")[-1]) + + @staticmethod + def _get_reader_key(handle): + """The graph key for reader.""" + handle_parts = str(handle).split(";") + return handle_parts[0] + ";" + handle_parts[-1] + + @staticmethod + def _get_mover_key(feeder, handle): + """The graph key for mover.""" + return feeder.op.name + ";" + TensorHandle._get_reader_key(handle) + + +@tf_export(v1=["get_session_handle"]) +def get_session_handle(data, name=None): + """Return the handle of `data`. + + This is EXPERIMENTAL and subject to change. + + Keep `data` "in-place" in the runtime and create a handle that can be + used to retrieve `data` in a subsequent run(). + + Combined with `get_session_tensor`, we can keep a tensor produced in + one run call in place, and use it as the input in a future run call. + + Args: + data: A tensor to be stored in the session. + name: Optional name prefix for the return tensor. + + Returns: + A scalar string tensor representing a unique handle for `data`. + + Raises: + TypeError: if `data` is not a Tensor. + + Example: + + ```python + c = tf.multiply(a, b) + h = tf.compat.v1.get_session_handle(c) + h = sess.run(h) + + p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) + b = tf.multiply(a, 10) + c = sess.run(b, feed_dict={p: h.handle}) + ``` + + """ + if not isinstance(data, tensor_lib.Tensor): + raise TypeError("`data` must be of type Tensor.") + + # Colocate this operation with data. + with ops.colocate_with(data): + return gen_data_flow_ops.get_session_handle(data, name=name) + + +@tf_export(v1=["get_session_tensor"]) +def get_session_tensor(handle, dtype, name=None): + """Get the tensor of type `dtype` by feeding a tensor handle. + + This is EXPERIMENTAL and subject to change. + + Get the value of the tensor from a tensor handle. The tensor + is produced in a previous run() and stored in the state of the + session. + + Args: + handle: The string representation of a persistent tensor handle. + dtype: The type of the output tensor. + name: Optional name prefix for the return tensor. + + Returns: + A pair of tensors. The first is a placeholder for feeding a + tensor handle and the second is the tensor in the session state + keyed by the tensor handle. + + Example: + + ```python + c = tf.multiply(a, b) + h = tf.compat.v1.get_session_handle(c) + h = sess.run(h) + + p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) + b = tf.multiply(a, 10) + c = sess.run(b, feed_dict={p: h.handle}) + ``` + + """ + handle_device = TensorHandle._get_device_name(handle) + with ops.device(handle_device): + holder = array_ops.placeholder(dtypes.string) + _register_handle_feeder(holder.graph, holder, dtype) + tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name) + return (holder, tensor) + + +@tf_export(v1=["delete_session_tensor"]) +def delete_session_tensor(handle, name=None): + """Delete the tensor for the given tensor handle. + + This is EXPERIMENTAL and subject to change. + + Delete the tensor of a given tensor handle. The tensor is produced + in a previous run() and stored in the state of the session. + + Args: + handle: The string representation of a persistent tensor handle. + name: Optional name prefix for the return tensor. + + Returns: + A pair of graph elements. The first is a placeholder for feeding a + tensor handle and the second is a deletion operation. + """ + handle_device = TensorHandle._get_device_name(handle) + with ops.device(handle_device): + holder = array_ops.placeholder(dtypes.string) + deleter = gen_data_flow_ops.delete_session_tensor(holder, name=name) + return (holder, deleter) + + +def _register_handle_feeder(graph, feeder, dtype): + graph._handle_feeders[feeder.op.name] = dtype + + +def _get_handle_feeder(graph, feeder): + return graph._handle_feeders.get(feeder.op.name) + + +def _get_handle_reader(graph, handle, dtype): + """Return a read subgraph for this handle.""" + graph_key = TensorHandle._get_reader_key(handle) + result = graph._handle_readers.get(graph_key) + if result is None: + # Create reader if we haven't done it. + handle_device = TensorHandle._get_device_name(handle) + with graph.as_default(), graph.device(handle_device): + holder = array_ops.placeholder(dtypes.string) + _register_handle_feeder(holder.graph, holder, dtype) + reader = gen_data_flow_ops.get_session_tensor(holder, dtype) + result = (holder, reader) + graph._handle_readers[graph_key] = result + return result + + +def _get_handle_mover(graph, feeder, handle): + """Return a move subgraph for this pair of feeder and handle.""" + dtype = _get_handle_feeder(graph, feeder) + if dtype is None: + return None + handle_device = TensorHandle._get_device_name(handle) + if feeder.op.device == handle_device: + return None + # Now we know we have to move the tensor. + graph_key = TensorHandle._get_mover_key(feeder, handle) + result = graph._handle_movers.get(graph_key) + if result is None: + # Create mover if we haven't done it. + holder, reader = _get_handle_reader(graph, handle, dtype) + with graph.as_default(), graph.device(feeder.op.device): + mover = gen_data_flow_ops.get_session_handle(reader) + result = (holder, mover) + graph._handle_movers[graph_key] = result + return result + + +def _get_handle_deleter(graph, deleter_key, handle): + """Return a deletion subgraph for this handle.""" + result = graph._handle_deleters.get(deleter_key) + if result is None: + # Create deleter if we haven't done it. + handle_device = TensorHandle._get_device_name(handle) + with graph.as_default(), graph.device(handle_device): + holder = array_ops.placeholder(dtypes.string) + deleter = gen_data_flow_ops.delete_session_tensor(holder) + result = (holder, deleter) + graph._handle_deleters[deleter_key] = result + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/stateless_random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/stateless_random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ddde1e25d66f82a4fb8153459dbb86b1b3b76f92 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/stateless_random_ops.py @@ -0,0 +1,921 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Stateless random ops which take seed as a tensor input.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import gen_random_index_shuffle_ops +from tensorflow.python.ops import gen_stateless_random_ops +from tensorflow.python.ops import gen_stateless_random_ops_v2 +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops_util +from tensorflow.python.ops import shape_util +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +ops.NotDifferentiable("StatelessMultinomial") +ops.NotDifferentiable("StatelessRandomBinomial") +ops.NotDifferentiable("StatelessRandomNormal") +ops.NotDifferentiable("StatelessRandomPoisson") +ops.NotDifferentiable("StatelessRandomUniform") +ops.NotDifferentiable("StatelessRandomUniformInt") +ops.NotDifferentiable("StatelessRandomUniformFullInt") +ops.NotDifferentiable("StatelessTruncatedNormal") +ops.NotDifferentiable("StatelessRandomNormalV2") +ops.NotDifferentiable("StatelessRandomUniformV2") +ops.NotDifferentiable("StatelessRandomUniformIntV2") +ops.NotDifferentiable("StatelessRandomUniformFullIntV2") +ops.NotDifferentiable("StatelessTruncatedNormalV2") +ops.NotDifferentiable("StatelessRandomShuffle") +ops.NotDifferentiable("RandomIndexShuffle") + + +@tf_export("random.split", "random.experimental.stateless_split") +@dispatch.add_dispatch_support +def split(seed, num=2, alg="auto_select"): + """Splits an RNG seed into `num` new seeds by adding a leading axis. + + Example: + + >>> seed = [1, 2] + >>> new_seeds = tf.random.split(seed, num=3) + >>> print(new_seeds) + tf.Tensor( + [[1105988140 1738052849] + [-335576002 370444179] + [ 10670227 -246211131]], shape=(3, 2), dtype=int32) + >>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :]) + + + Args: + seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`). + (When using XLA, only `int32` is allowed.) + num: optional, a positive integer or scalar tensor indicating the number of + seeds to produce (default 2). + alg: The RNG algorithm used to generate the random numbers. See + `tf.random.stateless_uniform` for a detailed explanation. + + Returns: + A tensor with shape [num, 2] representing `num` new seeds. It will have the + same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype + will be determined by `tf.convert_to_tensor`). + """ + seed = ops.convert_to_tensor(seed) + return stateless_random_uniform( + shape=[num, 2], + seed=seed, + dtype=seed.dtype, + minval=None, + maxval=None, + alg=alg, + ) + + +@tf_export("random.fold_in", "random.experimental.stateless_fold_in") +@dispatch.add_dispatch_support +def fold_in(seed, data, alg="auto_select"): + """Folds in data to an RNG seed to form a new RNG seed. + + For example, in a distributed-training setting, suppose we have a master seed + and a replica ID. We want to fold the replica ID into the master seed to + form a "replica seed" to be used by that replica later on, so that different + replicas will generate different random numbers but the reproducibility of the + whole system can still be controlled by the master seed: + + >>> master_seed = [1, 2] + >>> replica_id = 3 + >>> replica_seed = tf.random.experimental.stateless_fold_in( + ... master_seed, replica_id) + >>> print(replica_seed) + tf.Tensor([1105988140 3], shape=(2,), dtype=int32) + >>> tf.random.stateless_normal(shape=[3], seed=replica_seed) + + + Args: + seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`). + (When using XLA, only `int32` is allowed.) + data: an `int32` or `int64` scalar representing data to be folded in to the + seed. + alg: The RNG algorithm used to generate the random numbers. See + `tf.random.stateless_uniform` for a detailed explanation. + + Returns: + A new RNG seed that is a deterministic function of the inputs and is + statistically safe for producing a stream of new pseudo-random values. It + will have the same dtype as `data` (if `data` doesn't have an explict dtype, + the dtype will be determined by `tf.convert_to_tensor`). + """ + data = ops.convert_to_tensor(data) + seed1 = stateless_random_uniform( + shape=[], seed=seed, dtype=data.dtype, minval=None, maxval=None, alg=alg + ) + return array_ops_stack.stack([seed1, data]) + + +@tf_export("random.experimental.index_shuffle") +@dispatch.add_dispatch_support +def index_shuffle(index, seed, max_index): + """Outputs the position of `index` in a permutation of `[0, ..., max_index]`. + + For each possible `seed` and `max_index` there is one pseudorandom + permutation of the sequence `S=[0, ..., max_index]`. Instead of + materializing the full array we can compute the new position of any + integer `i` (`0 <= i <= max_index`) in `S`. This can be useful for + very large `max_index`s by avoiding allocating large chunks of + memory. + + In the simplest case, `index` and `max_index` are scalars, and + `seed` is a length-2 vector (as typical for stateless RNGs). But + you can add a leading batch dimension to all of them. If some of + them don't have the batch dimension while others do, `index_shuffle` + will add a batch dimension to the former by broadcasting. + + The input `index` and output can be used as indices to shuffle a + vector. For example: + + >>> vector = tf.constant(['e0', 'e1', 'e2', 'e3']) + >>> indices = tf.random.experimental.index_shuffle( + ... index=tf.range(4), seed=[5, 9], max_index=3) + >>> print(indices) + tf.Tensor([2 0 1 3], shape=(4,), dtype=int32) + >>> shuffled_vector = tf.gather(vector, indices) + >>> print(shuffled_vector) + tf.Tensor([b'e2' b'e0' b'e1' b'e3'], shape=(4,), dtype=string) + + More usefully, it can be used in a streaming (aka online) scenario such as + `tf.data`, where each element of `vector` is processed individually and the + whole `vector` is never materialized in memory. + + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.map( + ... lambda idx: tf.random.experimental.index_shuffle(idx, [5, 8], 9)) + >>> print(list(dataset.as_numpy_iterator())) + [3, 8, 0, 1, 2, 7, 6, 9, 4, 5] + + This operation is stateless (like the `tf.random.stateless_*` + functions), meaning the output is fully determined by the `seed` + (other inputs being equal). Each `seed` choice corresponds to one + permutation, so when calling this function multiple times for the + same shuffling, please make sure to use the same `seed`. For + example: + + >>> seed = [5, 9] + >>> idx0 = tf.random.experimental.index_shuffle(0, seed, 3) + >>> idx1 = tf.random.experimental.index_shuffle(1, seed, 3) + >>> idx2 = tf.random.experimental.index_shuffle(2, seed, 3) + >>> idx3 = tf.random.experimental.index_shuffle(3, seed, 3) + >>> shuffled_vector = tf.gather(vector, [idx0, idx1, idx2, idx3]) + >>> print(shuffled_vector) + tf.Tensor([b'e2' b'e0' b'e1' b'e3'], shape=(4,), dtype=string) + + Args: + index: An integer scalar tensor or vector with values in `[0, max_index]`. + It can be seen as either a value `v` in the sequence `S=[0, ..., + max_index]` to be permutated, or as an index of an element `e` in a + shuffled vector. + seed: A tensor of shape [2] or [n, 2] with dtype `int32`, `uint32`, `int64` + or `uint64`. The RNG seed. If the rank is unknown during graph-building + time it must be 1 at runtime. + max_index: A non-negative tensor with the same shape and dtype as `index`. + The upper bound (inclusive). + + Returns: + If all inputs were scalar (shape [2] for `seed`), the output will + be a scalar with the same dtype as `index`. The output can be seen + as the new position of `v` in `S`, or as the index of `e` in the + vector before shuffling. If one or multiple inputs were vectors + (shape [n, 2] for `seed`), then the output will be a vector of the + same size which each element shuffled independently. Scalar values + are broadcasted in this case. + """ + # We expect users to pass a seed with shape [2] to be consistent with other + # stateless_* ops, but the raw op expects shape [3]. + seed = ops.convert_to_tensor(seed) + # Pad the first dimension with an arbitrary number since our raw op expects + # shape [3]. + if seed.shape.rank is None: + paddings = [[1, 0]] + else: + paddings = [[1, 0]] + (seed.shape.rank - 1) * [[0, 0]] + seed = array_ops.pad(seed, paddings, constant_values=498247692) + return gen_random_index_shuffle_ops.random_index_shuffle( + index, seed=seed, max_index=max_index, rounds=4 + ) + + +@tf_export("random.experimental.stateless_shuffle") +@dispatch.add_dispatch_support +def stateless_shuffle(value, seed, alg="auto_select", name=None): + """Randomly and deterministically shuffles a tensor along its first dimension. + + The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + to one and only one `output[i]`. For example, a mapping that might occur for a + 3x2 tensor is: + + ```python + [[1, 2], [[5, 6], + [3, 4], ==> [1, 2], + [5, 6]] [3, 4]] + ``` + + >>> v = tf.constant([[1, 2], [3, 4], [5, 6]]) + >>> shuffled = tf.random.experimental.stateless_shuffle(v, seed=[8, 9]) + >>> print(shuffled) + tf.Tensor( + [[5 6] + [1 2] + [3 4]], shape=(3, 2), dtype=int32) + + This is a stateless version of `tf.random.shuffle`: if run twice with the + same `value` and `seed`, it will produce the same result. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + Args: + value: A Tensor to be shuffled. + seed: A shape [2] Tensor. The seed to the random number generator. Must have + dtype `int32` or `int64`. + alg: The RNG algorithm used to generate the random numbers. See + `tf.random.stateless_uniform` for a detailed explanation. + name: A name for the operation. + + Returns: + A tensor of same shape and type as `value`, shuffled along its first + dimension. + """ + with ops.name_scope(name, "stateless_shuffle", [value, seed]) as name: + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + return gen_stateless_random_ops_v2.stateless_shuffle( + value, key=key, counter=counter, alg=alg + ) + + +@tf_export("random.stateless_uniform") +@dispatch.add_dispatch_support +def stateless_random_uniform( + shape, + seed, + minval=0, + maxval=None, + dtype=dtypes.float32, + name=None, + alg="auto_select", +): + """Outputs deterministic pseudorandom values from a uniform distribution. + + This is a stateless version of `tf.random.uniform`: if run twice with the + same seeds and shapes, it will produce the same pseudorandom numbers. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + The generated values follow a uniform distribution in the range + `[minval, maxval)`. The lower bound `minval` is included in the range, while + the upper bound `maxval` is excluded. + + For floats, the default range is `[0, 1)`. For ints, at least `maxval` must + be specified explicitly. + + In the integer case, the random integers are slightly biased unless + `maxval - minval` is an exact power of two. The bias is small for values of + `maxval - minval` significantly smaller than the range of the output (either + `2**32` or `2**64`). + + For full-range (i.e. inclusive of both max and min) random integers, pass + `minval=None` and `maxval=None` with an integer `dtype`. For an integer dtype + either both `minval` and `maxval` must be `None` or neither may be `None`. For + example: + ```python + ints = tf.random.stateless_uniform( + [10], seed=(2, 3), minval=None, maxval=None, dtype=tf.int32) + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + minval: A Tensor or Python value of type `dtype`, broadcastable with `shape` + (for integer types, broadcasting is not supported, so it needs to be a + scalar). The lower bound on the range of random values to generate. Pass + `None` for full-range integers. Defaults to 0. + maxval: A Tensor or Python value of type `dtype`, broadcastable with `shape` + (for integer types, broadcasting is not supported, so it needs to be a + scalar). The upper bound on the range of random values to generate. + Defaults to 1 if `dtype` is floating point. Pass `None` for full-range + integers. + dtype: The type of the output: `float16`, `bfloat16`, `float32`, `float64`, + `int32`, or `int64`. For unbounded uniform ints (`minval`, `maxval` both + `None`), `uint32` and `uint64` may be used. Defaults to `float32`. + name: A name for the operation (optional). + alg: The RNG algorithm used to generate the random numbers. Valid choices + are `"philox"` for [the Philox + algorithm](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf), + `"threefry"` for [the ThreeFry + algorithm](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf), + and `"auto_select"` (default) for the system to automatically select an + algorithm based the device type. Values of `tf.random.Algorithm` can also + be used. Note that with `"auto_select"`, the outputs of this function may + change when it is running on a different device. + + Returns: + A tensor of the specified shape filled with random uniform values. + + Raises: + ValueError: If `dtype` is integral and only one of `minval` or `maxval` is + specified. + """ + dtype = dtypes.as_dtype(dtype) + accepted_dtypes = ( + dtypes.float16, + dtypes.bfloat16, + dtypes.float32, + dtypes.float64, + dtypes.int32, + dtypes.int64, + dtypes.uint32, + dtypes.uint64, + ) + if dtype not in accepted_dtypes: + raise ValueError( + f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are " + f"{accepted_dtypes}." + ) + if dtype.is_integer: + if (minval is None) != (maxval is None): + raise ValueError( + f"For integer `dtype` argument {dtype}, argument `minval` and " + f"`maxval` must be both None or not None. Got `minval`={minval} and " + f"`maxval`={maxval}." + ) + if minval is not None and dtype in (dtypes.uint32, dtypes.uint64): + raise ValueError( + f"Argument `dtype` got invalid value {dtype} when argument `minval` " + "is not None. Please don't use unsigned integers in this case." + ) + elif maxval is None: + maxval = 1 + with ops.name_scope( + name, "stateless_random_uniform", [shape, seed, minval, maxval] + ) as name: + shape = shape_util.shape_tensor(shape) + if dtype.is_integer and minval is None: + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + result = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2( + shape, key=key, counter=counter, dtype=dtype, alg=alg, name=name + ) + else: + minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") + maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") + if dtype.is_integer: + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + result = gen_stateless_random_ops_v2.stateless_random_uniform_int_v2( + shape, + key=key, + counter=counter, + minval=minval, + maxval=maxval, + alg=alg, + name=name, + ) + else: + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + rnd = gen_stateless_random_ops_v2.stateless_random_uniform_v2( + shape, key=key, counter=counter, dtype=dtype, alg=alg + ) + result = math_ops.add(rnd * (maxval - minval), minval, name=name) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export("random.stateless_binomial") +@dispatch.add_dispatch_support +def stateless_random_binomial( + shape, seed, counts, probs, output_dtype=dtypes.int32, name=None +): + """Outputs deterministic pseudorandom values from a binomial distribution. + + The generated values follow a binomial distribution with specified count and + probability of success parameters. + + This is a stateless version of `tf.random.Generator.binomial`: if run twice + with the same seeds and shapes, it will produce the same pseudorandom numbers. + The output is consistent across multiple runs on the same hardware (and + between CPU and GPU), but may change between versions of TensorFlow or on + non-CPU/GPU hardware. + + Example: + + ```python + counts = [10., 20.] + # Probability of success. + probs = [0.8] + + binomial_samples = tf.random.stateless_binomial( + shape=[2], seed=[123, 456], counts=counts, probs=probs) + + counts = ... # Shape [3, 1, 2] + probs = ... # Shape [1, 4, 2] + shape = [3, 4, 3, 4, 2] + # Sample shape will be [3, 4, 3, 4, 2] + binomial_samples = tf.random.stateless_binomial( + shape=shape, seed=[123, 456], counts=counts, probs=probs) + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + counts: Tensor. The counts of the binomial distribution. Must be + broadcastable with `probs`, and broadcastable with the rightmost + dimensions of `shape`. + probs: Tensor. The probability of success for the binomial distribution. + Must be broadcastable with `counts` and broadcastable with the rightmost + dimensions of `shape`. + output_dtype: The type of the output. Default: tf.int32 + name: A name for the operation (optional). + + Returns: + samples: A Tensor of the specified shape filled with random binomial + values. For each i, each samples[..., i] is an independent draw from + the binomial distribution on counts[i] trials with probability of + success probs[i]. + """ + with ops.name_scope( + name, "stateless_random_binomial", [shape, seed, counts, probs] + ) as name: + shape = shape_util.shape_tensor(shape) + probs = ops.convert_to_tensor( + probs, dtype_hint=dtypes.float32, name="probs" + ) + counts = ops.convert_to_tensor( + counts, dtype_hint=probs.dtype, name="counts" + ) + result = gen_stateless_random_ops.stateless_random_binomial( + shape=shape, seed=seed, counts=counts, probs=probs, dtype=output_dtype + ) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export("random.stateless_gamma") +@dispatch.add_dispatch_support +def stateless_random_gamma( + shape, seed, alpha, beta=None, dtype=dtypes.float32, name=None +): + """Outputs deterministic pseudorandom values from a gamma distribution. + + The generated values follow a gamma distribution with specified concentration + (`alpha`) and inverse scale (`beta`) parameters. + + This is a stateless version of `tf.random.gamma`: if run twice with the same + seeds and shapes, it will produce the same pseudorandom numbers. The output is + consistent across multiple runs on the same hardware (and between CPU and + GPU), + but may change between versions of TensorFlow or on non-CPU/GPU hardware. + + A slight difference exists in the interpretation of the `shape` parameter + between `stateless_gamma` and `gamma`: in `gamma`, the `shape` is always + prepended to the shape of the broadcast of `alpha` with `beta`; whereas in + `stateless_gamma` the `shape` parameter must always encompass the shapes of + each of `alpha` and `beta` (which must broadcast together to match the + trailing dimensions of `shape`). + + Note: Because internal calculations are done using `float64` and casting has + `floor` semantics, we must manually map zero outcomes to the smallest + possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This + means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise + should. This bias can only happen for small values of `alpha`, i.e., + `alpha << 1` or large values of `beta`, i.e., `beta >> 1`. + + The samples are differentiable w.r.t. alpha and beta. + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + Example: + + ```python + samples = tf.random.stateless_gamma([10, 2], seed=[12, 34], alpha=[0.5, 1.5]) + # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents + # the samples drawn from each distribution + + samples = tf.random.stateless_gamma([7, 5, 2], seed=[12, 34], alpha=[.5, 1.5]) + # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] + # represents the 7x5 samples drawn from each of the two distributions + + alpha = tf.constant([[1.], [3.], [5.]]) + beta = tf.constant([[3., 4.]]) + samples = tf.random.stateless_gamma( + [30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta) + # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions. + + with tf.GradientTape() as tape: + tape.watch([alpha, beta]) + loss = tf.reduce_mean(tf.square(tf.random.stateless_gamma( + [30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta))) + dloss_dalpha, dloss_dbeta = tape.gradient(loss, [alpha, beta]) + # unbiased stochastic derivatives of the loss function + alpha.shape == dloss_dalpha.shape # True + beta.shape == dloss_dbeta.shape # True + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + alpha: Tensor. The concentration parameter of the gamma distribution. Must + be broadcastable with `beta`, and broadcastable with the rightmost + dimensions of `shape`. + beta: Tensor. The inverse scale parameter of the gamma distribution. Must be + broadcastable with `alpha` and broadcastable with the rightmost dimensions + of `shape`. + dtype: Floating point dtype of `alpha`, `beta`, and the output. + name: A name for the operation (optional). + + Returns: + samples: A Tensor of the specified shape filled with random gamma values. + For each i, each `samples[..., i] is an independent draw from the gamma + distribution with concentration alpha[i] and scale beta[i]. + """ + with ops.name_scope( + name, "stateless_random_gamma", [shape, seed, alpha, beta] + ) as name: + shape = shape_util.shape_tensor(shape) + alpha = ops.convert_to_tensor(alpha, dtype=dtype, name="alpha") + beta = ops.convert_to_tensor( + beta if beta is not None else 1, name="beta", dtype=dtype + ) + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(alpha), array_ops.shape(beta) + ) + alpha_broadcast = array_ops.broadcast_to(alpha, broadcast_shape) + alg = "auto_select" + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + rnd = gen_stateless_random_ops_v2.stateless_random_gamma_v3( + shape, key=key, counter=counter, alg=alg, alpha=alpha_broadcast + ) + result = math_ops.maximum( + np.finfo(alpha.dtype.as_numpy_dtype).tiny, rnd / beta + ) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export("random.stateless_poisson") +@dispatch.add_dispatch_support +def stateless_random_poisson(shape, seed, lam, dtype=dtypes.int32, name=None): + """Outputs deterministic pseudorandom values from a Poisson distribution. + + The generated values follow a Poisson distribution with specified rate + parameter. + + This is a stateless version of `tf.random.poisson`: if run twice with the same + seeds and shapes, it will produce the same pseudorandom numbers. The output is + consistent across multiple runs on the same hardware, but may change between + versions of TensorFlow or on non-CPU/GPU hardware. + + A slight difference exists in the interpretation of the `shape` parameter + between `stateless_poisson` and `poisson`: in `poisson`, the `shape` is always + prepended to the shape of `lam`; whereas in `stateless_poisson` the shape of + `lam` must match the trailing dimensions of `shape`. + + Example: + + ```python + samples = tf.random.stateless_poisson([10, 2], seed=[12, 34], lam=[5, 15]) + # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents + # the samples drawn from each distribution + + samples = tf.random.stateless_poisson([7, 5, 2], seed=[12, 34], lam=[5, 15]) + # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] + # represents the 7x5 samples drawn from each of the two distributions + + rate = tf.constant([[1.], [3.], [5.]]) + samples = tf.random.stateless_poisson([30, 3, 1], seed=[12, 34], lam=rate) + # samples has shape [30, 3, 1], with 30 samples each of 3x1 distributions. + ``` + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + lam: Tensor. The rate parameter "lambda" of the Poisson distribution. Shape + must match the rightmost dimensions of `shape`. + dtype: Dtype of the samples (int or float dtypes are permissible, as samples + are discrete). Default: int32. + name: A name for the operation (optional). + + Returns: + samples: A Tensor of the specified shape filled with random Poisson values. + For each i, each `samples[..., i]` is an independent draw from the Poisson + distribution with rate `lam[i]`. + """ + with ops.name_scope( + name, "stateless_random_poisson", [shape, seed, lam] + ) as name: + shape = shape_util.shape_tensor(shape) + result = gen_stateless_random_ops.stateless_random_poisson( + shape, seed=seed, lam=lam, dtype=dtype + ) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export("random.stateless_normal") +@dispatch.add_dispatch_support +def stateless_random_normal( + shape, + seed, + mean=0.0, + stddev=1.0, + dtype=dtypes.float32, + name=None, + alg="auto_select", +): + """Outputs deterministic pseudorandom values from a normal distribution. + + This is a stateless version of `tf.random.normal`: if run twice with the + same seeds and shapes, it will produce the same pseudorandom numbers. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal + distribution. + stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation + of the normal distribution. + dtype: The float type of the output: `float16`, `bfloat16`, `float32`, + `float64`. Defaults to `float32`. + name: A name for the operation (optional). + alg: The RNG algorithm used to generate the random numbers. See + `tf.random.stateless_uniform` for a detailed explanation. + + Returns: + A tensor of the specified shape filled with random normal values. + """ + with ops.name_scope( + name, "stateless_random_normal", [shape, seed, mean, stddev] + ) as name: + shape = shape_util.shape_tensor(shape) + mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") + stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + rnd = gen_stateless_random_ops_v2.stateless_random_normal_v2( + shape, key=key, counter=counter, dtype=dtype, alg=alg + ) + result = math_ops.add(rnd * stddev, mean, name=name) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export("random.stateless_truncated_normal") +@dispatch.add_dispatch_support +def stateless_truncated_normal( + shape, + seed, + mean=0.0, + stddev=1.0, + dtype=dtypes.float32, + name=None, + alg="auto_select", +): + """Outputs deterministic pseudorandom values, truncated normally distributed. + + This is a stateless version of `tf.random.truncated_normal`: if run twice with + the same seeds and shapes, it will produce the same pseudorandom numbers. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + The generated values follow a normal distribution with specified mean and + standard deviation, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + Args: + shape: A 1-D integer Tensor or Python array. The shape of the output tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + mean: A 0-D Tensor or Python value of type `dtype`. The mean of the + truncated normal distribution. + stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation + of the normal distribution, before truncation. + dtype: The type of the output. + name: A name for the operation (optional). + alg: The RNG algorithm used to generate the random numbers. See + `tf.random.stateless_uniform` for a detailed explanation. + + Returns: + A tensor of the specified shape filled with random truncated normal values. + """ + with ops.name_scope( + name, "stateless_truncated_normal", [shape, seed, mean, stddev] + ) as name: + shape = shape_util.shape_tensor(shape) + mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") + stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") + key, counter, alg = random_ops_util.get_key_counter_alg(seed, alg) + rnd = gen_stateless_random_ops_v2.stateless_truncated_normal_v2( + shape, key=key, counter=counter, dtype=dtype, alg=alg + ) + result = math_ops.add(rnd * stddev, mean, name=name) + shape_util.maybe_set_static_shape(result, shape) + return result + + +@tf_export(v1=["random.stateless_multinomial"]) +@dispatch.add_dispatch_support +@deprecation.deprecated( + date=None, instructions="Use `tf.random.stateless_categorical` instead." +) +def stateless_multinomial( + logits, num_samples, seed, output_dtype=dtypes.int64, name=None +): + """Draws deterministic pseudorandom samples from a multinomial distribution. + + This is a stateless version of `tf.random.categorical`: if run twice with the + same seeds and shapes, it will produce the same pseudorandom numbers. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + Example: + + ```python + # samples has shape [1, 5], where each value is either 0 or 1 with equal + # probability. + samples = tf.random.stateless_categorical( + tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) + ``` + + Args: + logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + :]` represents the unnormalized log-probabilities for all classes. + num_samples: 0-D. Number of independent samples to draw for each row slice. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + output_dtype: The integer type of the output: `int32` or `int64`. Defaults + to `int64`. + name: Optional name for the operation. + + Returns: + The drawn samples of shape `[batch_size, num_samples]`. + """ + with ops.name_scope(name, "stateless_multinomial", [logits, seed]): + return stateless_multinomial_categorical_impl( + logits, num_samples, output_dtype, seed + ) + + +@tf_export("random.stateless_categorical") +@dispatch.add_dispatch_support +def stateless_categorical( + logits, num_samples, seed, dtype=dtypes.int64, name=None +): + """Draws deterministic pseudorandom samples from a categorical distribution. + + This is a stateless version of `tf.categorical`: if run twice with the + same seeds and shapes, it will produce the same pseudorandom numbers. The + output is consistent across multiple runs on the same hardware (and between + CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU + hardware. + + + Example: + + ```python + # samples has shape [1, 5], where each value is either 0 or 1 with equal + # probability. + samples = tf.random.stateless_categorical( + tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) + ``` + + Args: + logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + :]` represents the unnormalized log-probabilities for all classes. + num_samples: 0-D. Number of independent samples to draw for each row slice. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + dtype: The integer type of the output: `int32` or `int64`. Defaults to + `int64`. + name: Optional name for the operation. + + Returns: + The drawn samples of shape `[batch_size, num_samples]`. + """ + with ops.name_scope(name, "stateless_categorical", [logits, seed]): + return stateless_multinomial_categorical_impl( + logits, num_samples, dtype, seed + ) + + +def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed): + """Implementation for stateless multinomial/categorical ops (v1/v2).""" + logits = ops.convert_to_tensor(logits, name="logits") + dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64 + accepted_dtypes = (dtypes.int32, dtypes.int64) + if dtype not in accepted_dtypes: + raise ValueError( + f"Argument `dtype` got invalid value {dtype}. Accepted dtypes are " + f"{accepted_dtypes}." + ) + return gen_stateless_random_ops.stateless_multinomial( + logits, num_samples, seed, output_dtype=dtype + ) + + +@dispatch.add_dispatch_support +@tf_export("random.stateless_parameterized_truncated_normal") +def stateless_parameterized_truncated_normal( + shape, seed, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, name=None +): + """Outputs random values from a truncated normal distribution. + + The generated values follow a normal distribution with specified mean and + standard deviation, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + + Examples: + + Sample from a Truncated normal, with deferring shape parameters that + broadcast. + + >>> means = 0. + >>> stddevs = tf.math.exp(tf.random.uniform(shape=[2, 3])) + >>> minvals = [-1., -2., -1000.] + >>> maxvals = [[10000.], [1.]] + >>> y = tf.random.stateless_parameterized_truncated_normal( + ... shape=[10, 2, 3], seed=[7, 17], + ... means=means, stddevs=stddevs, minvals=minvals, maxvals=maxvals) + >>> y.shape + TensorShape([10, 2, 3]) + + Args: + shape: A 1-D integer `Tensor` or Python array. The shape of the output + tensor. + seed: A shape [2] Tensor, the seed to the random number generator. Must have + dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) + means: A `Tensor` or Python value of type `dtype`. The mean of the truncated + normal distribution. This must broadcast with `stddevs`, `minvals` and + `maxvals`, and the broadcasted shape must be dominated by `shape`. + stddevs: A `Tensor` or Python value of type `dtype`. The standard deviation + of the truncated normal distribution. This must broadcast with `means`, + `minvals` and `maxvals`, and the broadcasted shape must be dominated by + `shape`. + minvals: A `Tensor` or Python value of type `dtype`. The minimum value of + the truncated normal distribution. This must broadcast with `means`, + `stddevs` and `maxvals`, and the broadcasted shape must be dominated by + `shape`. + maxvals: A `Tensor` or Python value of type `dtype`. The maximum value of + the truncated normal distribution. This must broadcast with `means`, + `stddevs` and `minvals`, and the broadcasted shape must be dominated by + `shape`. + name: A name for the operation (optional). + + Returns: + A tensor of the specified shape filled with random truncated normal values. + """ + with ops.name_scope( + name, + "stateless_parameterized_truncated_normal", + [shape, means, stddevs, minvals, maxvals], + ) as name: + shape_tensor = shape_util.shape_tensor(shape) + means_tensor = ops.convert_to_tensor(means, name="means") + stddevs_tensor = ops.convert_to_tensor(stddevs, name="stddevs") + minvals_tensor = ops.convert_to_tensor(minvals, name="minvals") + maxvals_tensor = ops.convert_to_tensor(maxvals, name="maxvals") + rnd = gen_stateless_random_ops.stateless_parameterized_truncated_normal( + shape_tensor, + seed, + means_tensor, + stddevs_tensor, + minvals_tensor, + maxvals_tensor, + ) + shape_util.maybe_set_static_shape(rnd, shape) + return rnd diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/summary_ops_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/summary_ops_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..761f42885ada5961dd5eaf2e15856f947852e64c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/summary_ops_v2.py @@ -0,0 +1,1466 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations to emit summaries.""" + +import abc +import collections +import functools +import os +import re +import threading + +from tensorflow.core.framework import graph_pb2 +from tensorflow.core.framework import summary_pb2 +from tensorflow.core.protobuf import config_pb2 +from tensorflow.dtensor.python import api as dtensor_api +from tensorflow.dtensor.python import layout as layout_lib +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import smart_cond +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_resource_variable_ops +from tensorflow.python.ops import gen_summary_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import summary_op_util +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.profiler import profiler_v2 as _profiler +from tensorflow.python.trackable import resource +from tensorflow.python.training import training_util +from tensorflow.python.util import deprecation +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util.tf_export import tf_export + + +# Name for graph collection of summary writer init ops, which is only exposed +# as a legacy API for tf.contrib.summary in TF 1.x. +_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2" + + +class _SummaryState(threading.local): + + def __init__(self): + super(_SummaryState, self).__init__() + self.is_recording = None + # TODO(slebedev): why a separate flag for DS and is it on by default? + self.is_recording_distribution_strategy = True + self.writer = None + self.step = None + + +_summary_state = _SummaryState() + + +class _SummaryContextManager: + """Context manager to implement SummaryWriter.as_default().""" + # Note: this is a class so that it's possible to implement `set_as_default()` + # simply via `as_default().__enter__()`. We can't do that with @contextmanager + # because the `finally` block will be executed when the generator is GCed. + + def __init__(self, writer, step=None): + self._writer = writer + self._step = step + self._old_writer = None + self._old_step = None + + def __enter__(self): + self._old_writer = _summary_state.writer + _summary_state.writer = self._writer + if self._step is not None: + self._old_step = _summary_state.step + _summary_state.step = self._step + return self._writer + + def __exit__(self, *exc): + # Flushes the summary writer in eager mode or in graph functions, but + # not in legacy graph mode (you're on your own there). + _summary_state.writer.flush() + _summary_state.writer = self._old_writer + if self._step is not None: + _summary_state.step = self._old_step + return False + + +def _should_record_summaries_internal(default_state): + """Returns boolean Tensor if summaries should/shouldn't be recorded. + + Now the summary condition is decided by logical "and" of below conditions: + First, summary writer must be set. Given this constraint is met, + ctx.summary_recording and ctx.summary_recording_distribution_strategy. + The former one is usually set by user, and the latter one is controlled + by DistributionStrategy (tf.distribute.ReplicaContext). + + Args: + default_state: can be True or False. The default summary behavior when + summary writer is set and the user does not specify + ctx.summary_recording and ctx.summary_recording_distribution_strategy + is True. + """ + if _summary_state.writer is None: + return constant_op.constant(False) + + if not callable(_summary_state.is_recording): + static_cond = tensor_util.constant_value(_summary_state.is_recording) + if static_cond is not None and not static_cond: + return constant_op.constant(False) + + resolve = lambda x: x() if callable(x) else x + cond_distributed = resolve(_summary_state.is_recording_distribution_strategy) + cond = resolve(_summary_state.is_recording) + if cond is None: + cond = default_state + return math_ops.logical_and(cond_distributed, cond) + + +@tf_export("summary.should_record_summaries", v1=[]) +def should_record_summaries(): + """Returns boolean Tensor which is True if summaries will be recorded. + + If no default summary writer is currently registered, this always returns + False. Otherwise, this reflects the recording condition has been set via + `tf.summary.record_if()` (except that it may return False for some replicas + when using `tf.distribute.Strategy`). If no recording condition is active, + it defaults to True. + """ + return _should_record_summaries_internal(default_state=True) + + +# Legacy symbol used by tf.contrib.summary.should_record_summaries. +def _legacy_contrib_should_record_summaries(): + """Returns boolean Tensor which is true if summaries should be recorded.""" + return _should_record_summaries_internal(default_state=False) + + +@tf_export("summary.record_if", v1=[]) +@tf_contextlib.contextmanager +def record_if(condition): + """Sets summary recording on or off per the provided boolean value. + + The provided value can be a python boolean, a scalar boolean Tensor, or + or a callable providing such a value; if a callable is passed it will be + invoked on-demand to determine whether summary writing will occur. Note that + when calling record_if() in an eager mode context, if you intend to provide a + varying condition like `step % 100 == 0`, you must wrap this in a + callable to avoid immediate eager evaluation of the condition. In particular, + using a callable is the only way to have your condition evaluated as part of + the traced body of an @tf.function that is invoked from within the + `record_if()` context. + + Args: + condition: can be True, False, a bool Tensor, or a callable providing such. + + Yields: + Returns a context manager that sets this value on enter and restores the + previous value on exit. + """ + old = _summary_state.is_recording + try: + _summary_state.is_recording = condition + yield + finally: + _summary_state.is_recording = old + + +def has_default_writer(): + """Returns a boolean indicating whether a default summary writer exists.""" + return _summary_state.writer is not None + + +# TODO(apassos) consider how to handle local step here. +def record_summaries_every_n_global_steps(n, global_step=None): + """Sets the should_record_summaries Tensor to true if global_step % n == 0.""" + if global_step is None: + global_step = training_util.get_or_create_global_step() + with ops.device("cpu:0"): + should = lambda: math_ops.equal(global_step % n, 0) + if not context.executing_eagerly(): + should = should() + return record_if(should) + + +def always_record_summaries(): + """Sets the should_record_summaries Tensor to always true.""" + return record_if(True) + + +def never_record_summaries(): + """Sets the should_record_summaries Tensor to always false.""" + return record_if(False) + + +@tf_export("summary.experimental.get_step", v1=[]) +def get_step(): + """Returns the default summary step for the current thread. + + Returns: + The step set by `tf.summary.experimental.set_step()` if one has been set, + otherwise None. + """ + return _summary_state.step + + +@tf_export("summary.experimental.set_step", v1=[]) +def set_step(step): + """Sets the default summary step for the current thread. + + For convenience, this function sets a default value for the `step` parameter + used in summary-writing functions elsewhere in the API so that it need not + be explicitly passed in every such invocation. The value can be a constant + or a variable, and can be retrieved via `tf.summary.experimental.get_step()`. + + Note: when using this with @tf.functions, the step value will be captured at + the time the function is traced, so changes to the step outside the function + will not be reflected inside the function unless using a `tf.Variable` step. + + Args: + step: An `int64`-castable default step value, or None to unset. + """ + _summary_state.step = step + + +@tf_export("summary.SummaryWriter", v1=[]) +class SummaryWriter(metaclass=abc.ABCMeta): + """Interface representing a stateful summary writer object.""" + + def set_as_default(self, step=None): + """Enables this summary writer for the current thread. + + For convenience, if `step` is not None, this function also sets a default + value for the `step` parameter used in summary-writing functions elsewhere + in the API so that it need not be explicitly passed in every such + invocation. The value can be a constant or a variable. + + Note: when setting `step` in a @tf.function, the step value will be + captured at the time the function is traced, so changes to the step outside + the function will not be reflected inside the function unless using + a `tf.Variable` step. + + Args: + step: An `int64`-castable default step value, or `None`. When not `None`, + the current step is modified to the given value. When `None`, the + current step is not modified. + """ + self.as_default(step).__enter__() + + def as_default(self, step=None): + """Returns a context manager that enables summary writing. + + For convenience, if `step` is not None, this function also sets a default + value for the `step` parameter used in summary-writing functions elsewhere + in the API so that it need not be explicitly passed in every such + invocation. The value can be a constant or a variable. + + Note: when setting `step` in a @tf.function, the step value will be + captured at the time the function is traced, so changes to the step outside + the function will not be reflected inside the function unless using + a `tf.Variable` step. + + For example, `step` can be used as: + + ```python + with writer_a.as_default(step=10): + tf.summary.scalar(tag, value) # Logged to writer_a with step 10 + with writer_b.as_default(step=20): + tf.summary.scalar(tag, value) # Logged to writer_b with step 20 + tf.summary.scalar(tag, value) # Logged to writer_a with step 10 + ``` + + Args: + step: An `int64`-castable default step value, or `None`. When not `None`, + the current step is captured, replaced by a given one, and the original + one is restored when the context manager exits. When `None`, the current + step is not modified (and not restored when the context manager exits). + + Returns: + The context manager. + """ + return _SummaryContextManager(self, step) + + def init(self): + """Initializes the summary writer.""" + raise NotImplementedError() + + def flush(self): + """Flushes any buffered data.""" + raise NotImplementedError() + + def close(self): + """Flushes and closes the summary writer.""" + raise NotImplementedError() + + +class _ResourceSummaryWriter(SummaryWriter): + """Implementation of SummaryWriter using a SummaryWriterInterface resource.""" + + def __init__(self, create_fn, init_op_fn, mesh=None): + if mesh is not None: + with dtensor_api.default_mesh(mesh.host_mesh()): + self._resource = create_fn() + self._init_op = init_op_fn(self._resource) + else: + self._resource = create_fn() + self._init_op = init_op_fn(self._resource) + + self._closed = False + if context.executing_eagerly(): + self._set_up_resource_deleter() + else: + ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op) + + self._mesh = mesh + + # Extension point to be overridden by subclasses to customize deletion. + + def _set_up_resource_deleter(self): + self._resource_deleter = resource_variable_ops.EagerResourceDeleter( + handle=self._resource, handle_device="cpu:0") + + def set_as_default(self, step=None): + """See `SummaryWriter.set_as_default`.""" + if context.executing_eagerly() and self._closed: + raise RuntimeError(f"SummaryWriter {self!r} is already closed") + super().set_as_default(step) + + def as_default(self, step=None): + """See `SummaryWriter.as_default`.""" + if context.executing_eagerly() and self._closed: + raise RuntimeError(f"SummaryWriter {self!r} is already closed") + return super().as_default(step) + + def init(self): + """See `SummaryWriter.init`.""" + if context.executing_eagerly() and self._closed: + raise RuntimeError(f"SummaryWriter {self!r} is already closed") + return self._init_op + + def flush(self): + """See `SummaryWriter.flush`.""" + if context.executing_eagerly() and self._closed: + return + with ops.device("cpu:0"): + return gen_summary_ops.flush_summary_writer(self._resource) + + def close(self): + """See `SummaryWriter.close`.""" + if context.executing_eagerly() and self._closed: + return + try: + with ops.control_dependencies([self.flush()]): + with ops.device("cpu:0"): + return gen_summary_ops.close_summary_writer(self._resource) + finally: + if context.executing_eagerly(): + self._closed = True + + +class _MultiMetaclass( + type(_ResourceSummaryWriter), type(resource.TrackableResource)): + pass + + +class _TrackableResourceSummaryWriter( + _ResourceSummaryWriter, + resource.TrackableResource, + metaclass=_MultiMetaclass): + """A `_ResourceSummaryWriter` subclass that implements `TrackableResource`.""" + + def __init__(self, create_fn, init_op_fn, mesh=None): + # Resolve multiple inheritance via explicit calls to __init__() on parents. + resource.TrackableResource.__init__(self, device="/CPU:0") + self._create_fn = create_fn + self._init_op_fn = init_op_fn + # Pass .resource_handle into _ResourceSummaryWriter parent class rather than + # create_fn, to ensure it accesses the resource handle only through the + # cached property so that everything is using a single resource handle. + _ResourceSummaryWriter.__init__( + self, + create_fn=lambda: self.resource_handle, + init_op_fn=init_op_fn, + mesh=mesh, + ) + + # Override for TrackableResource implementation. + def _create_resource(self): + return self._create_fn() + + # Override for TrackableResource implementation. + def _initialize(self): + return self._init_op_fn(self.resource_handle) + + # Override for TrackableResource implementation. + def _destroy_resource(self): + gen_resource_variable_ops.destroy_resource_op( + self.resource_handle, ignore_lookup_error=True) + + def _set_up_resource_deleter(self): + # Override to suppress ResourceSummaryWriter implementation; we don't need + # the deleter since TrackableResource.__del__() handles it for us. + pass + + +class _LegacyResourceSummaryWriter(SummaryWriter): + """Legacy resource-backed SummaryWriter for tf.contrib.summary.""" + + def __init__(self, resource, init_op_fn): + self._resource = resource + self._init_op_fn = init_op_fn + init_op = self.init() + if context.executing_eagerly(): + self._resource_deleter = resource_variable_ops.EagerResourceDeleter( + handle=self._resource, handle_device="cpu:0") + else: + ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op) + + def init(self): + """See `SummaryWriter.init`.""" + return self._init_op_fn(self._resource) + + def flush(self): + """See `SummaryWriter.flush`.""" + with ops.device("cpu:0"): + return gen_summary_ops.flush_summary_writer(self._resource) + + def close(self): + """See `SummaryWriter.close`.""" + with ops.control_dependencies([self.flush()]): + with ops.device("cpu:0"): + return gen_summary_ops.close_summary_writer(self._resource) + + +class _NoopSummaryWriter(SummaryWriter): + """A summary writer that does nothing, for create_noop_writer().""" + + def set_as_default(self, step=None): + pass + + @tf_contextlib.contextmanager + def as_default(self, step=None): + yield + + def init(self): + pass + + def flush(self): + pass + + def close(self): + pass + + +@tf_export(v1=["summary.initialize"]) +def initialize( + graph=None, # pylint: disable=redefined-outer-name + session=None): + """Initializes summary writing for graph execution mode. + + This operation is a no-op when executing eagerly. + + This helper method provides a higher-level alternative to using + `tf.contrib.summary.summary_writer_initializer_op` and + `tf.contrib.summary.graph`. + + Most users will also want to call `tf.compat.v1.train.create_global_step` + which can happen before or after this function is called. + + Args: + graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer. + This function will not write the default graph by default. When + writing to an event log file, the associated step will be zero. + session: So this method can call `tf.Session.run`. This defaults + to `tf.compat.v1.get_default_session`. + + Raises: + RuntimeError: If the current thread has no default + `tf.contrib.summary.SummaryWriter`. + ValueError: If session wasn't passed and no default session. + """ + if context.executing_eagerly(): + return + if _summary_state.writer is None: + raise RuntimeError("No default tf.contrib.summary.SummaryWriter found") + if session is None: + session = ops.get_default_session() + if session is None: + raise ValueError("Argument `session must be passed if no default " + "session exists") + session.run(summary_writer_initializer_op()) + if graph is not None: + data = _serialize_graph(graph) + x = array_ops.placeholder(dtypes.string) + session.run(graph_v1(x, 0), feed_dict={x: data}) + + +@tf_export("summary.create_file_writer", v1=[]) +def create_file_writer_v2( + logdir, + max_queue=None, + flush_millis=None, + filename_suffix=None, + name=None, + experimental_trackable=False, + experimental_mesh=None, +): + """Creates a summary file writer for the given log directory. + + Args: + logdir: a string specifying the directory in which to write an event file. + max_queue: the largest number of summaries to keep in a queue; will flush + once the queue gets bigger than this. Defaults to 10. + flush_millis: the largest interval between flushes. Defaults to 120,000. + filename_suffix: optional suffix for the event file name. Defaults to `.v2`. + name: a name for the op that creates the writer. + experimental_trackable: a boolean that controls whether the returned writer + will be a `TrackableResource`, which makes it compatible with SavedModel + when used as a `tf.Module` property. + experimental_mesh: a `tf.experimental.dtensor.Mesh` instance. When running + with DTensor, the mesh (experimental_mesh.host_mesh()) will be used for + bringing all the DTensor logging from accelerator to CPU mesh. + + Returns: + A SummaryWriter object. + """ + # TODO(b/291655717): Revisit the experimental_mesh once we have soft placment. + if logdir is None: + raise ValueError("Argument `logdir` cannot be None") + inside_function = ops.inside_function() + with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"): + # Run init inside an init_scope() to hoist it out of tf.functions. + with ops.init_scope(): + if context.executing_eagerly(): + _check_create_file_writer_args( + inside_function, + logdir=logdir, + max_queue=max_queue, + flush_millis=flush_millis, + filename_suffix=filename_suffix) + logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string) + if max_queue is None: + max_queue = constant_op.constant(10) + if flush_millis is None: + flush_millis = constant_op.constant(2 * 60 * 1000) + if filename_suffix is None: + filename_suffix = constant_op.constant(".v2") + + def create_fn(): + # Use unique shared_name to prevent resource sharing in eager mode, but + # otherwise use a fixed shared_name to allow SavedModel TF 1.x loading. + if context.executing_eagerly(): + shared_name = context.anonymous_name() + else: + shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access + return gen_summary_ops.summary_writer( + shared_name=shared_name, name=name) + + init_op_fn = functools.partial( + gen_summary_ops.create_summary_file_writer, + logdir=logdir, + max_queue=max_queue, + flush_millis=flush_millis, + filename_suffix=filename_suffix) + if experimental_trackable: + return _TrackableResourceSummaryWriter( + create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh + ) + else: + return _ResourceSummaryWriter( + create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh + ) + + +def create_file_writer(logdir, + max_queue=None, + flush_millis=None, + filename_suffix=None, + name=None): + """Creates a summary file writer in the current context under the given name. + + Args: + logdir: a string, or None. If a string, creates a summary file writer + which writes to the directory named by the string. If None, returns + a mock object which acts like a summary writer but does nothing, + useful to use as a context manager. + max_queue: the largest number of summaries to keep in a queue; will + flush once the queue gets bigger than this. Defaults to 10. + flush_millis: the largest interval between flushes. Defaults to 120,000. + filename_suffix: optional suffix for the event file name. Defaults to `.v2`. + name: Shared name for this SummaryWriter resource stored to default + Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a + summary writer resource with this shared name already exists, the returned + SummaryWriter wraps that resource and the other arguments have no effect. + + Returns: + Either a summary writer or an empty object which can be used as a + summary writer. + """ + if logdir is None: + return _NoopSummaryWriter() + logdir = str(logdir) + with ops.device("cpu:0"): + if max_queue is None: + max_queue = constant_op.constant(10) + if flush_millis is None: + flush_millis = constant_op.constant(2 * 60 * 1000) + if filename_suffix is None: + filename_suffix = constant_op.constant(".v2") + if name is None: + name = "logdir:" + logdir + resource = gen_summary_ops.summary_writer(shared_name=name) + return _LegacyResourceSummaryWriter( + resource=resource, + init_op_fn=functools.partial( + gen_summary_ops.create_summary_file_writer, + logdir=logdir, + max_queue=max_queue, + flush_millis=flush_millis, + filename_suffix=filename_suffix)) + + +@tf_export("summary.create_noop_writer", v1=[]) +def create_noop_writer(): + """Returns a summary writer that does nothing. + + This is useful as a placeholder in code that expects a context manager. + """ + return _NoopSummaryWriter() + + +def _cleanse_string(name, pattern, value): + if isinstance(value, str) and pattern.search(value) is None: + raise ValueError(f"{name} ({value}) must match {pattern.pattern}") + return ops.convert_to_tensor(value, dtypes.string) + + +def _nothing(): + """Convenient else branch for when summaries do not record.""" + return constant_op.constant(False) + + +@tf_export(v1=["summary.all_v2_summary_ops"]) +def all_v2_summary_ops(): + """Returns all V2-style summary ops defined in the current default graph. + + This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except + for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but + does *not* include TF 1.x tf.summary ops. + + Returns: + List of summary ops, or None if called under eager execution. + """ + if context.executing_eagerly(): + return None + return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access + + +def summary_writer_initializer_op(): + """Graph-mode only. Returns the list of ops to create all summary writers. + + Returns: + The initializer ops. + + Raises: + RuntimeError: If in Eager mode. + """ + if context.executing_eagerly(): + raise RuntimeError( + "tf.contrib.summary.summary_writer_initializer_op is only " + "supported in graph mode.") + return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME) + + +_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]") + + +@tf_export("summary.experimental.summary_scope", v1=[]) +@tf_contextlib.contextmanager +def summary_scope(name, default_name="summary", values=None): + """Experimental context manager for use when defining a custom summary op. + + This behaves similarly to `tf.name_scope`, except that it returns a generated + summary tag in addition to the scope name. The tag is structurally similar to + the scope name - derived from the user-provided name, prefixed with enclosing + name scopes if any - but we relax the constraint that it be uniquified, as + well as the character set limitation (so the user-provided name can contain + characters not legal for scope names; in the scope name these are removed). + + This makes the summary tag more predictable and consistent for the user. + + For example, to define a new summary op called `my_op`: + + ```python + def my_op(name, my_value, step): + with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope): + my_value = tf.convert_to_tensor(my_value) + return tf.summary.write(tag, my_value, step=step) + ``` + + Args: + name: string name for the summary. + default_name: Optional; if provided, used as default name of the summary. + values: Optional; passed as `values` parameter to name_scope. + + Yields: + A tuple `(tag, scope)` as described above. + """ + name = name or default_name + current_scope = ops.get_name_scope() + tag = current_scope + "/" + name if current_scope else name + # Strip illegal characters from the scope name, and if that leaves nothing, + # use None instead so we pick up the default name. + name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None + with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope: + yield tag, scope + + +@tf_export("summary.write", v1=[]) +def write(tag, tensor, step=None, metadata=None, name=None): + """Writes a generic summary to the default SummaryWriter if one exists. + + This exists primarily to support the definition of type-specific summary ops + like scalar() and image(), and is not intended for direct use unless defining + a new type-specific summary op. + + Args: + tag: string tag used to identify the summary (e.g. in TensorBoard), usually + generated with `tf.summary.summary_scope` + tensor: the Tensor holding the summary data to write or a callable that + returns this Tensor. If a callable is passed, it will only be called when + a default SummaryWriter exists and the recording condition specified by + `record_if()` is met. + step: Explicit `int64`-castable monotonic step value for this summary. If + omitted, this defaults to `tf.summary.experimental.get_step()`, which must + not be None. + metadata: Optional SummaryMetadata, as a proto or serialized bytes + name: Optional string name for this op. + + Returns: + True on success, or false if no summary was written because no default + summary writer was available. + + Raises: + ValueError: if a default writer exists, but no step was provided and + `tf.summary.experimental.get_step()` is None. + """ + with ops.name_scope(name, "write_summary") as scope: + if _summary_state.writer is None: + return constant_op.constant(False) + if step is None: + step = get_step() + if metadata is None: + serialized_metadata = b"" + elif hasattr(metadata, "SerializeToString"): + serialized_metadata = metadata.SerializeToString() + else: + serialized_metadata = metadata + + def record(): + """Record the actual summary and return True.""" + if step is None: + raise ValueError("No step set. Please specify one either through the " + "`step` argument or through " + "tf.summary.experimental.set_step()") + + # Note the identity to move the tensor to the CPU. + with ops.device("cpu:0"): + summary_tensor = tensor() if callable(tensor) else array_ops.identity( + tensor) + # For DTensor, the device scope above doesn't work, we need to + # explicitly copy the resource tensor to host mesh, which is a cpu + # mesh. + writer = _summary_state.writer + summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor) + step_value = _maybe_convert_tensor_to_dtensor(writer, step) + + write_summary_op = gen_summary_ops.write_summary( + writer._resource, # pylint: disable=protected-access + step_value, + summary_value, + tag, + serialized_metadata, + name=scope, + ) + with ops.control_dependencies([write_summary_op]): + return constant_op.constant(True) + + op = smart_cond.smart_cond( + should_record_summaries(), record, _nothing, name="summary_cond") + if not context.executing_eagerly(): + ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access + return op + + +@tf_export("summary.experimental.write_raw_pb", v1=[]) +def write_raw_pb(tensor, step=None, name=None): + """Writes a summary using raw `tf.compat.v1.Summary` protocol buffers. + + Experimental: this exists to support the usage of V1-style manual summary + writing (via the construction of a `tf.compat.v1.Summary` protocol buffer) + with the V2 summary writing API. + + Args: + tensor: the string Tensor holding one or more serialized `Summary` protobufs + step: Explicit `int64`-castable monotonic step value for this summary. If + omitted, this defaults to `tf.summary.experimental.get_step()`, which must + not be None. + name: Optional string name for this op. + + Returns: + True on success, or false if no summary was written because no default + summary writer was available. + + Raises: + ValueError: if a default writer exists, but no step was provided and + `tf.summary.experimental.get_step()` is None. + """ + with ops.name_scope(name, "write_raw_pb") as scope: + if _summary_state.writer is None: + return constant_op.constant(False) + if step is None: + step = get_step() + if step is None: + raise ValueError("No step set. Please specify one either through the " + "`step` argument or through " + "tf.summary.experimental.set_step()") + + def record(): + """Record the actual summary and return True.""" + # Note the identity to move the tensor to the CPU. + with ops.device("cpu:0"): + raw_summary_op = gen_summary_ops.write_raw_proto_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + step, + array_ops.identity(tensor), + name=scope) + with ops.control_dependencies([raw_summary_op]): + return constant_op.constant(True) + + with ops.device("cpu:0"): + op = smart_cond.smart_cond( + should_record_summaries(), record, _nothing, name="summary_cond") + if not context.executing_eagerly(): + ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access + return op + + +def summary_writer_function(name, tensor, function, family=None): + """Helper function to write summaries. + + Args: + name: name of the summary + tensor: main tensor to form the summary + function: function taking a tag and a scope which writes the summary + family: optional, the summary's family + + Returns: + The result of writing the summary. + """ + name_scope = ops.get_name_scope() + if name_scope: + # Add a slash to allow reentering the name scope. + name_scope += "/" + def record(): + with ops.name_scope(name_scope), summary_op_util.summary_scope( + name, family, values=[tensor]) as (tag, scope): + with ops.control_dependencies([function(tag, scope)]): + return constant_op.constant(True) + + if _summary_state.writer is None: + return control_flow_ops.no_op() + with ops.device("cpu:0"): + op = smart_cond.smart_cond( + _legacy_contrib_should_record_summaries(), record, _nothing, name="") + if not context.executing_eagerly(): + ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access + return op + + +def generic(name, tensor, metadata=None, family=None, step=None): + """Writes a tensor summary if possible.""" + + def function(tag, scope): + if metadata is None: + serialized_metadata = constant_op.constant("") + elif hasattr(metadata, "SerializeToString"): + serialized_metadata = constant_op.constant(metadata.SerializeToString()) + else: + serialized_metadata = metadata + # Note the identity to move the tensor to the CPU. + return gen_summary_ops.write_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + _choose_step(step), + array_ops.identity(tensor), + tag, + serialized_metadata, + name=scope) + return summary_writer_function(name, tensor, function, family=family) + + +def scalar(name, tensor, family=None, step=None): + """Writes a scalar summary if possible. + + Unlike `tf.contrib.summary.generic` this op may change the dtype + depending on the writer, for both practical and efficiency concerns. + + Args: + name: An arbitrary name for this summary. + tensor: A `tf.Tensor` Must be one of the following types: + `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, + `int8`, `uint16`, `half`, `uint32`, `uint64`. + family: Optional, the summary's family. + step: The `int64` monotonic step variable, which defaults + to `tf.compat.v1.train.get_global_step`. + + Returns: + The created `tf.Operation` or a `tf.no_op` if summary writing has + not been enabled for this context. + """ + + def function(tag, scope): + # Note the identity to move the tensor to the CPU. + return gen_summary_ops.write_scalar_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + _choose_step(step), + tag, + array_ops.identity(tensor), + name=scope) + + return summary_writer_function(name, tensor, function, family=family) + + +def histogram(name, tensor, family=None, step=None): + """Writes a histogram summary if possible.""" + + def function(tag, scope): + # Note the identity to move the tensor to the CPU. + return gen_summary_ops.write_histogram_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + _choose_step(step), + tag, + array_ops.identity(tensor), + name=scope) + + return summary_writer_function(name, tensor, function, family=family) + + +def image(name, tensor, bad_color=None, max_images=3, family=None, step=None): + """Writes an image summary if possible.""" + + def function(tag, scope): + bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8) + if bad_color is None else bad_color) + # Note the identity to move the tensor to the CPU. + return gen_summary_ops.write_image_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + _choose_step(step), + tag, + array_ops.identity(tensor), + bad_color_, + max_images, + name=scope) + + return summary_writer_function(name, tensor, function, family=family) + + +def audio(name, tensor, sample_rate, max_outputs, family=None, step=None): + """Writes an audio summary if possible.""" + + def function(tag, scope): + # Note the identity to move the tensor to the CPU. + return gen_summary_ops.write_audio_summary( + _summary_state.writer._resource, # pylint: disable=protected-access + _choose_step(step), + tag, + array_ops.identity(tensor), + sample_rate=sample_rate, + max_outputs=max_outputs, + name=scope) + + return summary_writer_function(name, tensor, function, family=family) + + +def graph_v1(param, step=None, name=None): + """Writes a TensorFlow graph to the summary interface. + + The graph summary is, strictly speaking, not a summary. Conditions + like `tf.summary.should_record_summaries` do not apply. Only + a single graph can be associated with a particular run. If multiple + graphs are written, then only the last one will be considered by + TensorBoard. + + When not using eager execution mode, the user should consider passing + the `graph` parameter to `tf.compat.v1.summary.initialize` instead of + calling this function. Otherwise special care needs to be taken when + using the graph to record the graph. + + Args: + param: A `tf.Tensor` containing a serialized graph proto. When + eager execution is enabled, this function will automatically + coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types. + step: The global step variable. This doesn't have useful semantics + for graph summaries, but is used anyway, due to the structure of + event log files. This defaults to the global step. + name: A name for the operation (optional). + + Returns: + The created `tf.Operation` or a `tf.no_op` if summary writing has + not been enabled for this context. + + Raises: + TypeError: If `param` isn't already a `tf.Tensor` in graph mode. + """ + if not context.executing_eagerly() and not isinstance( + param, tensor_lib.Tensor + ): + raise TypeError( + "graph() needs a argument `param` to be tf.Tensor " + "(e.g. tf.placeholder) in graph mode, but received " + f"param={param} of type {type(param).__name__}." + ) + writer = _summary_state.writer + if writer is None: + return control_flow_ops.no_op() + with ops.device("cpu:0"): + if isinstance(param, (ops.Graph, graph_pb2.GraphDef)): + tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string) + else: + tensor = array_ops.identity(param) + return gen_summary_ops.write_graph_summary( + writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access + + +@tf_export("summary.graph", v1=[]) +def graph(graph_data): + """Writes a TensorFlow graph summary. + + Write an instance of `tf.Graph` or `tf.compat.v1.GraphDef` as summary only + in an eager mode. Please prefer to use the trace APIs (`tf.summary.trace_on`, + `tf.summary.trace_off`, and `tf.summary.trace_export`) when using + `tf.function` which can automatically collect and record graphs from + executions. + + Usage Example: + ```py + writer = tf.summary.create_file_writer("/tmp/mylogs") + + @tf.function + def f(): + x = constant_op.constant(2) + y = constant_op.constant(3) + return x**y + + with writer.as_default(): + tf.summary.graph(f.get_concrete_function().graph) + + # Another example: in a very rare use case, when you are dealing with a TF v1 + # graph. + graph = tf.Graph() + with graph.as_default(): + c = tf.constant(30.0) + with writer.as_default(): + tf.summary.graph(graph) + ``` + + Args: + graph_data: The TensorFlow graph to write, as a `tf.Graph` or a + `tf.compat.v1.GraphDef`. + + Returns: + True on success, or False if no summary was written because no default + summary writer was available. + + Raises: + ValueError: `graph` summary API is invoked in a graph mode. + """ + if not context.executing_eagerly(): + raise ValueError("graph() cannot be invoked inside a graph context.") + writer = _summary_state.writer + if writer is None: + return constant_op.constant(False) + with ops.device("cpu:0"): + if not should_record_summaries(): + return constant_op.constant(False) + + if isinstance(graph_data, (ops.Graph, graph_pb2.GraphDef)): + tensor = ops.convert_to_tensor( + _serialize_graph(graph_data), dtypes.string) + else: + raise ValueError("Argument 'graph_data' is not tf.Graph or " + "tf.compat.v1.GraphDef. Received graph_data=" + f"{graph_data} of type {type(graph_data).__name__}.") + + gen_summary_ops.write_graph_summary( + writer._resource, # pylint: disable=protected-access + # Graph does not have step. Set to 0. + 0, + tensor, + ) + return constant_op.constant(True) + + +def import_event(tensor, name=None): + """Writes a `tf.compat.v1.Event` binary proto. + + This can be used to import existing event logs into a new summary writer sink. + Please note that this is lower level than the other summary functions and + will ignore the `tf.summary.should_record_summaries` setting. + + Args: + tensor: A `tf.Tensor` of type `string` containing a serialized + `tf.compat.v1.Event` proto. + name: A name for the operation (optional). + + Returns: + The created `tf.Operation`. + """ + return gen_summary_ops.import_event( + _summary_state.writer._resource, tensor, name=name) # pylint: disable=protected-access + + +@tf_export("summary.flush", v1=[]) +def flush(writer=None, name=None): + """Forces summary writer to send any buffered data to storage. + + This operation blocks until that finishes. + + Args: + writer: The `tf.summary.SummaryWriter` to flush. If None, the current + default writer will be used instead; if there is no current writer, this + returns `tf.no_op`. + name: Ignored legacy argument for a name for the operation. + + Returns: + The created `tf.Operation`. + """ + del name # unused + if writer is None: + writer = _summary_state.writer + if writer is None: + return control_flow_ops.no_op() + if isinstance(writer, SummaryWriter): + return writer.flush() + raise ValueError("Invalid argument to flush(): %r" % (writer,)) + + +def legacy_raw_flush(writer=None, name=None): + """Legacy version of flush() that accepts a raw resource tensor for `writer`. + + Do not use this function in any new code. Not supported and not part of the + public TF APIs. + + Args: + writer: The `tf.summary.SummaryWriter` to flush. If None, the current + default writer will be used instead; if there is no current writer, this + returns `tf.no_op`. For this legacy version only, also accepts a raw + resource tensor pointing to the underlying C++ writer resource. + name: Ignored legacy argument for a name for the operation. + + Returns: + The created `tf.Operation`. + """ + if writer is None or isinstance(writer, SummaryWriter): + # Forward to the TF2 implementation of flush() when possible. + return flush(writer, name) + else: + # Legacy fallback in case we were passed a raw resource tensor. + with ops.device("cpu:0"): + return gen_summary_ops.flush_summary_writer(writer, name=name) + + +def eval_dir(model_dir, name=None): + """Construct a logdir for an eval summary writer.""" + return os.path.join(model_dir, "eval" if not name else "eval_" + name) + + +@deprecation.deprecated(date=None, + instructions="Renamed to create_file_writer().") +def create_summary_file_writer(*args, **kwargs): + """Please use `tf.contrib.summary.create_file_writer`.""" + logging.warning("Deprecation Warning: create_summary_file_writer was renamed " + "to create_file_writer") + return create_file_writer(*args, **kwargs) + + +def _serialize_graph(arbitrary_graph): + if isinstance(arbitrary_graph, ops.Graph): + return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString() + else: + return arbitrary_graph.SerializeToString() + + +def _choose_step(step): + if step is None: + return training_util.get_or_create_global_step() + if not isinstance(step, tensor_lib.Tensor): + return ops.convert_to_tensor(step, dtypes.int64) + return step + + +def _check_create_file_writer_args(inside_function, **kwargs): + """Helper to check the validity of arguments to a create_file_writer() call. + + Args: + inside_function: whether the create_file_writer() call is in a tf.function + **kwargs: the arguments to check, as kwargs to give them names. + + Raises: + ValueError: if the arguments are graph tensors. + """ + for arg_name, arg in kwargs.items(): + if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg): + if inside_function: + raise ValueError( + f"Invalid graph Tensor argument '{arg_name}={arg}' to " + "create_file_writer() inside an @tf.function. The create call will " + "be lifted into the outer eager execution context, so it cannot " + "consume graph tensors defined inside the function body.") + else: + raise ValueError( + f"Invalid graph Tensor argument '{arg_name}={arg}' to eagerly " + "executed create_file_writer().") + + +def run_metadata(name, data, step=None): + """Writes entire RunMetadata summary. + + A RunMetadata can contain DeviceStats, partition graphs, and function graphs. + Please refer to the proto for definition of each field. + + Args: + name: A name for this summary. The summary tag used for TensorBoard will be + this name prefixed by any active name scopes. + data: A RunMetadata proto to write. + step: Explicit `int64`-castable monotonic step value for this summary. If + omitted, this defaults to `tf.summary.experimental.get_step()`, which must + not be None. + + Returns: + True on success, or false if no summary was written because no default + summary writer was available. + + Raises: + ValueError: if a default writer exists, but no step was provided and + `tf.summary.experimental.get_step()` is None. + """ + summary_metadata = summary_pb2.SummaryMetadata() + # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for + # the rationale. + summary_metadata.plugin_data.plugin_name = "graph_run_metadata" + # version number = 1 + summary_metadata.plugin_data.content = b"1" + + with summary_scope(name, + "graph_run_metadata_summary", + [data, step]) as (tag, _): + with ops.device("cpu:0"): + tensor = constant_op.constant(data.SerializeToString(), + dtype=dtypes.string) + return write( + tag=tag, + tensor=tensor, + step=step, + metadata=summary_metadata) + + +def run_metadata_graphs(name, data, step=None): + """Writes graphs from a RunMetadata summary. + + Args: + name: A name for this summary. The summary tag used for TensorBoard will be + this name prefixed by any active name scopes. + data: A RunMetadata proto to write. + step: Explicit `int64`-castable monotonic step value for this summary. If + omitted, this defaults to `tf.summary.experimental.get_step()`, which must + not be None. + + Returns: + True on success, or false if no summary was written because no default + summary writer was available. + + Raises: + ValueError: if a default writer exists, but no step was provided and + `tf.summary.experimental.get_step()` is None. + """ + summary_metadata = summary_pb2.SummaryMetadata() + # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for + # the rationale. + summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph" + # version number = 1 + summary_metadata.plugin_data.content = b"1" + + data = config_pb2.RunMetadata( + function_graphs=data.function_graphs, + partition_graphs=data.partition_graphs) + + with summary_scope(name, + "graph_run_metadata_graph_summary", + [data, step]) as (tag, _): + with ops.device("cpu:0"): + tensor = constant_op.constant(data.SerializeToString(), + dtype=dtypes.string) + return write( + tag=tag, + tensor=tensor, + step=step, + metadata=summary_metadata) + + +_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler")) +_current_trace_context_lock = threading.Lock() +_current_trace_context = None + + +@tf_export("summary.trace_on", v1=[]) +def trace_on(graph=True, profiler=False, profiler_outdir=None): # pylint: disable=redefined-outer-name + """Starts a trace to record computation graphs and profiling information. + + Must be invoked in eager mode. + + When enabled, TensorFlow runtime will collect information that can later be + exported and consumed by TensorBoard. The trace is activated across the entire + TensorFlow runtime and affects all threads of execution. + + To stop the trace and export the collected information, use + `tf.summary.trace_export`. To stop the trace without exporting, use + `tf.summary.trace_off`. + + Args: + graph: If True, enables collection of executed graphs. It includes ones from + tf.function invocation and ones from the legacy graph mode. The default is + True. + profiler: If True, enables the advanced profiler. Enabling profiler + implicitly enables the graph collection. The profiler may incur a high + memory overhead. The default is False. + profiler_outdir: Output directory for profiler. It is required when profiler + is enabled when trace was started. Otherwise, it is ignored. + """ + if ops.inside_function(): + logging.warn("Cannot enable trace inside a tf.function.") + return + if not context.executing_eagerly(): + logging.warn("Must enable trace in eager mode.") + return + + global _current_trace_context + with _current_trace_context_lock: + if _current_trace_context: + logging.warn("Trace already enabled") + return + + if graph and not profiler: + context.context().enable_graph_collection() + if profiler: + if profiler_outdir is None: + # TODO(b/149431324): Change this to throw a ValueError when Tensorflow + # major version advances. (current version is 2.15) + logging.warn( + "No `profiler_outdir` passed to trace_on(). Profiler won't be" + " enabled." + ) + else: + context.context().enable_run_metadata() + _profiler.start(profiler_outdir) + + _current_trace_context = _TraceContext(graph=graph, profiler=profiler) + + +# TODO(b/149431324): Delete `profiler_outdir` arg when Tensorflow major version +# advances. (current version is 2.15) +@tf_export("summary.trace_export", v1=[]) +def trace_export(name, step=None, profiler_outdir=None): + """Stops and exports the active trace as a Summary and/or profile file. + + Stops the trace and exports all metadata collected during the trace to the + default SummaryWriter, if one has been set. + + Args: + name: A name for the summary to be written. + step: Explicit `int64`-castable monotonic step value for this summary. If + omitted, this defaults to `tf.summary.experimental.get_step()`, which must + not be None. + profiler_outdir: This arg is a no-op. Please set this in trace_on(). + + Raises: + ValueError: if a default writer exists, but no step was provided and + `tf.summary.experimental.get_step()` is None. + """ + # TODO(stephanlee): See if we can remove profiler_outdir and infer it from + # the SummaryWriter's logdir. + global _current_trace_context + + if ops.inside_function(): + logging.warn("Cannot export trace inside a tf.function.") + return + if not context.executing_eagerly(): + logging.warn("Can only export trace while executing eagerly.") + return + + with _current_trace_context_lock: + if _current_trace_context is None: + raise ValueError("Must enable trace before export through " + "tf.summary.trace_on.") + graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name + + run_meta = context.context().export_run_metadata() + + if graph and not profiler: + run_metadata_graphs(name, run_meta, step) + else: + run_metadata(name, run_meta, step) + + if profiler: + if profiler_outdir: + logging.warn( + "Ignoring `profiler_outdir` passed to trace_export(). Please pass it" + " to trace_on() instead." + ) + _profiler.stop() + + trace_off() + + +@tf_export("summary.trace_off", v1=[]) +def trace_off(): + """Stops the current trace and discards any collected information.""" + global _current_trace_context + with _current_trace_context_lock: + if _current_trace_context is None: + return # tracing already off + graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name, unpacking-non-sequence + _current_trace_context = None + + if graph: + # Disabling run_metadata disables graph collection as well. + context.context().disable_run_metadata() + + if profiler: + try: + _profiler.stop() + except Exception as e: # pylint: disable=broad-except + logging.warn("Error while stopping profiler: %s", e) + pass + + +def _maybe_convert_tensor_to_dtensor(writer, tensor): + if getattr(writer, "_mesh", None) is not None: + mesh = writer._mesh.host_mesh() # pylint: disable=protected-access + tensor = dtensor_api.copy_to_mesh( + tensor, layout_lib.Layout.replicated(mesh, rank=tensor.shape.rank) + ) + return tensor