diff --git a/llava_next/share/terminfo/a/aaa b/llava_next/share/terminfo/a/aaa new file mode 100644 index 0000000000000000000000000000000000000000..4057ed848a461b5b9233660999ec00878cae1887 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa differ diff --git a/llava_next/share/terminfo/a/aaa+rv b/llava_next/share/terminfo/a/aaa+rv new file mode 100644 index 0000000000000000000000000000000000000000..02cef13943d58eaed2857c0c846d68d3c67b490f Binary files /dev/null and b/llava_next/share/terminfo/a/aaa+rv differ diff --git a/llava_next/share/terminfo/a/aaa-18-rv b/llava_next/share/terminfo/a/aaa-18-rv new file mode 100644 index 0000000000000000000000000000000000000000..99fd2859d7593daa69ec8476b1e94ef3517d3ced Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-18-rv differ diff --git a/llava_next/share/terminfo/a/aaa-26 b/llava_next/share/terminfo/a/aaa-26 new file mode 100644 index 0000000000000000000000000000000000000000..d73e48a0b5c2b50014d430aaf16fafe153f20c93 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-26 differ diff --git a/llava_next/share/terminfo/a/aaa-30-ctxt b/llava_next/share/terminfo/a/aaa-30-ctxt new file mode 100644 index 0000000000000000000000000000000000000000..282c36211ff30eb04bd039b6aacdefbe3de7e749 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-30-ctxt differ diff --git a/llava_next/share/terminfo/a/aaa-40-rv b/llava_next/share/terminfo/a/aaa-40-rv new file mode 100644 index 0000000000000000000000000000000000000000..ee6046b72a920a60676b8d239bc34892106ba1d4 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-40-rv differ diff --git a/llava_next/share/terminfo/a/aaa-ctxt b/llava_next/share/terminfo/a/aaa-ctxt new file mode 100644 index 0000000000000000000000000000000000000000..282c36211ff30eb04bd039b6aacdefbe3de7e749 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-ctxt differ diff --git a/llava_next/share/terminfo/a/aaa-db b/llava_next/share/terminfo/a/aaa-db new file mode 100644 index 0000000000000000000000000000000000000000..524ad36e9e389e2bca2d979c3494d456ab07f2c7 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-db differ diff --git a/llava_next/share/terminfo/a/aaa-rv-ctxt b/llava_next/share/terminfo/a/aaa-rv-ctxt new file mode 100644 index 0000000000000000000000000000000000000000..fb8f018f418f862c0d8aa524e2806d7b08594339 Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-rv-ctxt differ diff --git a/llava_next/share/terminfo/a/abm80 b/llava_next/share/terminfo/a/abm80 new file mode 100644 index 0000000000000000000000000000000000000000..8cbee81177aafb14080a447921ad390f2a135e5c Binary files /dev/null and b/llava_next/share/terminfo/a/abm80 differ diff --git a/llava_next/share/terminfo/a/abm85e b/llava_next/share/terminfo/a/abm85e new file mode 100644 index 0000000000000000000000000000000000000000..5016ca6a5d18822640278f49346874cdbfaa6c71 Binary files /dev/null and b/llava_next/share/terminfo/a/abm85e differ diff --git a/llava_next/share/terminfo/a/abm85h b/llava_next/share/terminfo/a/abm85h new file mode 100644 index 0000000000000000000000000000000000000000..4f39fe891c6a92b5722206c95af9c1613040a18b Binary files /dev/null and b/llava_next/share/terminfo/a/abm85h differ diff --git a/llava_next/share/terminfo/a/adm12 b/llava_next/share/terminfo/a/adm12 new file mode 100644 index 0000000000000000000000000000000000000000..b0f05904614b2966ffba9b69dd10962e15ae4faa Binary files /dev/null and b/llava_next/share/terminfo/a/adm12 differ diff --git a/llava_next/share/terminfo/a/adm20 b/llava_next/share/terminfo/a/adm20 new file mode 100644 index 0000000000000000000000000000000000000000..a6e7ba432dde5751a0175107041ffb2fd5472041 Binary files /dev/null and b/llava_next/share/terminfo/a/adm20 differ diff --git a/llava_next/share/terminfo/a/adm3 b/llava_next/share/terminfo/a/adm3 new file mode 100644 index 0000000000000000000000000000000000000000..428cd6e184de904c2ba76ffcc0796278f28a4e47 Binary files /dev/null and b/llava_next/share/terminfo/a/adm3 differ diff --git a/llava_next/share/terminfo/a/adm3a+ b/llava_next/share/terminfo/a/adm3a+ new file mode 100644 index 0000000000000000000000000000000000000000..4a74c19ba58c426a95b42539587dee7f211f0254 Binary files /dev/null and b/llava_next/share/terminfo/a/adm3a+ differ diff --git a/llava_next/share/terminfo/a/adm42 b/llava_next/share/terminfo/a/adm42 new file mode 100644 index 0000000000000000000000000000000000000000..95d9e9a9dd76619af17bc9067f1b2c1800bf7fa7 Binary files /dev/null and b/llava_next/share/terminfo/a/adm42 differ diff --git a/llava_next/share/terminfo/a/aixterm+sl b/llava_next/share/terminfo/a/aixterm+sl new file mode 100644 index 0000000000000000000000000000000000000000..276c9c369f23661e803fb93bdcf943a32bc2c4a0 Binary files /dev/null and b/llava_next/share/terminfo/a/aixterm+sl differ diff --git a/llava_next/share/terminfo/a/aj510 b/llava_next/share/terminfo/a/aj510 new file mode 100644 index 0000000000000000000000000000000000000000..7185b9243ee1985ff27c1121871a3b50333d1e07 Binary files /dev/null and b/llava_next/share/terminfo/a/aj510 differ diff --git a/llava_next/share/terminfo/a/altoheath b/llava_next/share/terminfo/a/altoheath new file mode 100644 index 0000000000000000000000000000000000000000..9dee8171e7d85285229356520f79d7fe6df1aa63 Binary files /dev/null and b/llava_next/share/terminfo/a/altoheath differ diff --git a/llava_next/share/terminfo/a/altos-3 b/llava_next/share/terminfo/a/altos-3 new file mode 100644 index 0000000000000000000000000000000000000000..1589774e513a871dcc45cdd2020b523a2f37a7c3 Binary files /dev/null and b/llava_next/share/terminfo/a/altos-3 differ diff --git a/llava_next/share/terminfo/a/altos7 b/llava_next/share/terminfo/a/altos7 new file mode 100644 index 0000000000000000000000000000000000000000..c6457f54b925ba4bef8006fe56c4d46ab3f85ff3 Binary files /dev/null and b/llava_next/share/terminfo/a/altos7 differ diff --git a/llava_next/share/terminfo/a/ambas b/llava_next/share/terminfo/a/ambas new file mode 100644 index 0000000000000000000000000000000000000000..4057ed848a461b5b9233660999ec00878cae1887 Binary files /dev/null and b/llava_next/share/terminfo/a/ambas differ diff --git a/llava_next/share/terminfo/a/amiga b/llava_next/share/terminfo/a/amiga new file mode 100644 index 0000000000000000000000000000000000000000..6233d1c811f3ae593fc779fa7a5c45effb585845 Binary files /dev/null and b/llava_next/share/terminfo/a/amiga differ diff --git a/llava_next/share/terminfo/a/amiga-vnc b/llava_next/share/terminfo/a/amiga-vnc new file mode 100644 index 0000000000000000000000000000000000000000..e1ebe0cdac524e868965b081019d1ae67f082b6b Binary files /dev/null and b/llava_next/share/terminfo/a/amiga-vnc differ diff --git a/llava_next/share/terminfo/a/amp219w b/llava_next/share/terminfo/a/amp219w new file mode 100644 index 0000000000000000000000000000000000000000..d45bd6492d098763bf9b0933fb324af697f4344f Binary files /dev/null and b/llava_next/share/terminfo/a/amp219w differ diff --git a/llava_next/share/terminfo/a/ampex-219w b/llava_next/share/terminfo/a/ampex-219w new file mode 100644 index 0000000000000000000000000000000000000000..d45bd6492d098763bf9b0933fb324af697f4344f Binary files /dev/null and b/llava_next/share/terminfo/a/ampex-219w differ diff --git a/llava_next/share/terminfo/a/ampex-232 b/llava_next/share/terminfo/a/ampex-232 new file mode 100644 index 0000000000000000000000000000000000000000..698c33d51674d2d50a8e815ffa36b08ece95646a Binary files /dev/null and b/llava_next/share/terminfo/a/ampex-232 differ diff --git a/llava_next/share/terminfo/a/ampex219w b/llava_next/share/terminfo/a/ampex219w new file mode 100644 index 0000000000000000000000000000000000000000..d45bd6492d098763bf9b0933fb324af697f4344f Binary files /dev/null and b/llava_next/share/terminfo/a/ampex219w differ diff --git a/llava_next/share/terminfo/a/ansi+cup b/llava_next/share/terminfo/a/ansi+cup new file mode 100644 index 0000000000000000000000000000000000000000..d1b653d113820c21832154ecabe96bf28b67eca4 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+cup differ diff --git a/llava_next/share/terminfo/a/ansi+erase b/llava_next/share/terminfo/a/ansi+erase new file mode 100644 index 0000000000000000000000000000000000000000..39c766a551e565ea94210ed616be52ad66dd8f1d Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+erase differ diff --git a/llava_next/share/terminfo/a/ansi+idc b/llava_next/share/terminfo/a/ansi+idc new file mode 100644 index 0000000000000000000000000000000000000000..90fc17df010c64bc4650053d2820b6aaf525be1e Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+idc differ diff --git a/llava_next/share/terminfo/a/ansi+idl1 b/llava_next/share/terminfo/a/ansi+idl1 new file mode 100644 index 0000000000000000000000000000000000000000..f743208cb7cffc3b9dc0ed37cb914b3b020b451e Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+idl1 differ diff --git a/llava_next/share/terminfo/a/ansi+rca2 b/llava_next/share/terminfo/a/ansi+rca2 new file mode 100644 index 0000000000000000000000000000000000000000..2c1175943106ff4d0471189ff55b926c534bb8e0 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+rca2 differ diff --git a/llava_next/share/terminfo/a/ansi+sgrso b/llava_next/share/terminfo/a/ansi+sgrso new file mode 100644 index 0000000000000000000000000000000000000000..a6bc57f275fe83d9ad05ab73e0977c96fd084d96 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+sgrso differ diff --git a/llava_next/share/terminfo/a/ansi-m b/llava_next/share/terminfo/a/ansi-m new file mode 100644 index 0000000000000000000000000000000000000000..df6fd79849a834696a53669e1eaa5393185371c3 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi-m differ diff --git a/llava_next/share/terminfo/a/ansi80x25-raw b/llava_next/share/terminfo/a/ansi80x25-raw new file mode 100644 index 0000000000000000000000000000000000000000..e3621f7c22fb8c7e28d708217db550269bb9d068 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi80x25-raw differ diff --git a/llava_next/share/terminfo/a/ansis-mono b/llava_next/share/terminfo/a/ansis-mono new file mode 100644 index 0000000000000000000000000000000000000000..4baf2844bbcc390b10590d0a73c56eff73ea8271 Binary files /dev/null and b/llava_next/share/terminfo/a/ansis-mono differ diff --git a/llava_next/share/terminfo/a/ansisysk b/llava_next/share/terminfo/a/ansisysk new file mode 100644 index 0000000000000000000000000000000000000000..55c1aa7b9d3e1f65c644bbaf4a99f765fc991456 Binary files /dev/null and b/llava_next/share/terminfo/a/ansisysk differ diff --git a/llava_next/share/terminfo/a/apollo+vt132 b/llava_next/share/terminfo/a/apollo+vt132 new file mode 100644 index 0000000000000000000000000000000000000000..d75f5f62bcf2a97b118b9cd5218233d112a9f6b3 Binary files /dev/null and b/llava_next/share/terminfo/a/apollo+vt132 differ diff --git a/llava_next/share/terminfo/a/apollo_color b/llava_next/share/terminfo/a/apollo_color new file mode 100644 index 0000000000000000000000000000000000000000..18eb8170937b07b36bcd68dc7915d12e08849fcc Binary files /dev/null and b/llava_next/share/terminfo/a/apollo_color differ diff --git a/llava_next/share/terminfo/a/apple-uterm-vb b/llava_next/share/terminfo/a/apple-uterm-vb new file mode 100644 index 0000000000000000000000000000000000000000..047855e8a4d277a69afa83e3bda5ce3c1d45edb1 Binary files /dev/null and b/llava_next/share/terminfo/a/apple-uterm-vb differ diff --git a/llava_next/share/terminfo/a/appleIIe b/llava_next/share/terminfo/a/appleIIe new file mode 100644 index 0000000000000000000000000000000000000000..1311b90c397a4ccf9d4c22a318cb6fa8b45c16c9 Binary files /dev/null and b/llava_next/share/terminfo/a/appleIIe differ diff --git a/llava_next/share/terminfo/a/appleIIgs b/llava_next/share/terminfo/a/appleIIgs new file mode 100644 index 0000000000000000000000000000000000000000..1311b90c397a4ccf9d4c22a318cb6fa8b45c16c9 Binary files /dev/null and b/llava_next/share/terminfo/a/appleIIgs differ diff --git a/llava_next/share/terminfo/a/arm100-am b/llava_next/share/terminfo/a/arm100-am new file mode 100644 index 0000000000000000000000000000000000000000..2729a3db57f9b4dbc71e18331c5af2275b02f846 Binary files /dev/null and b/llava_next/share/terminfo/a/arm100-am differ diff --git a/llava_next/share/terminfo/a/arm100-wam b/llava_next/share/terminfo/a/arm100-wam new file mode 100644 index 0000000000000000000000000000000000000000..c19035a96991238587eff331e87361f759f17447 Binary files /dev/null and b/llava_next/share/terminfo/a/arm100-wam differ diff --git a/llava_next/share/terminfo/a/atarist-m b/llava_next/share/terminfo/a/atarist-m new file mode 100644 index 0000000000000000000000000000000000000000..b48ef93d33406f0934cecb007347e51cb49172bc Binary files /dev/null and b/llava_next/share/terminfo/a/atarist-m differ diff --git a/llava_next/share/terminfo/a/att2300 b/llava_next/share/terminfo/a/att2300 new file mode 100644 index 0000000000000000000000000000000000000000..cb9e0683df7dc3d437f1f2b23a87831b098f41ad Binary files /dev/null and b/llava_next/share/terminfo/a/att2300 differ diff --git a/llava_next/share/terminfo/a/att4410 b/llava_next/share/terminfo/a/att4410 new file mode 100644 index 0000000000000000000000000000000000000000..41072d6638c22ee6d9ecf136ac75c49919eaead2 Binary files /dev/null and b/llava_next/share/terminfo/a/att4410 differ diff --git a/llava_next/share/terminfo/a/att4415 b/llava_next/share/terminfo/a/att4415 new file mode 100644 index 0000000000000000000000000000000000000000..c1ff8e5aa105f1c8aab1b4a3faf0ada6fc321883 Binary files /dev/null and b/llava_next/share/terminfo/a/att4415 differ diff --git a/llava_next/share/terminfo/a/att4415-nl b/llava_next/share/terminfo/a/att4415-nl new file mode 100644 index 0000000000000000000000000000000000000000..d2713dab0c39706fe4d74cc25f7988b72c299f5f Binary files /dev/null and b/llava_next/share/terminfo/a/att4415-nl differ diff --git a/llava_next/share/terminfo/a/att4424m b/llava_next/share/terminfo/a/att4424m new file mode 100644 index 0000000000000000000000000000000000000000..88e67696961507af49e9cdf9a882da543777b4c8 Binary files /dev/null and b/llava_next/share/terminfo/a/att4424m differ diff --git a/llava_next/share/terminfo/a/att4425-nl b/llava_next/share/terminfo/a/att4425-nl new file mode 100644 index 0000000000000000000000000000000000000000..86c8672065bb9f7fcc5cd537481f26f20d4e7a1a Binary files /dev/null and b/llava_next/share/terminfo/a/att4425-nl differ diff --git a/llava_next/share/terminfo/a/att4425-w b/llava_next/share/terminfo/a/att4425-w new file mode 100644 index 0000000000000000000000000000000000000000..e9ffd490042e48c5b7836f97d083ae854799ff7e Binary files /dev/null and b/llava_next/share/terminfo/a/att4425-w differ diff --git a/llava_next/share/terminfo/a/att5620 b/llava_next/share/terminfo/a/att5620 new file mode 100644 index 0000000000000000000000000000000000000000..1bb40715fdaeac6142fcc6281d95d8ec60469312 Binary files /dev/null and b/llava_next/share/terminfo/a/att5620 differ diff --git a/llava_next/share/terminfo/a/att5620-24 b/llava_next/share/terminfo/a/att5620-24 new file mode 100644 index 0000000000000000000000000000000000000000..14c34312049c55a037f6dd6e65d83c3c5a6cda48 Binary files /dev/null and b/llava_next/share/terminfo/a/att5620-24 differ diff --git a/llava_next/share/terminfo/a/att610-w b/llava_next/share/terminfo/a/att610-w new file mode 100644 index 0000000000000000000000000000000000000000..c81aaf9978330ecd6a55b045318bd23867b587e5 Binary files /dev/null and b/llava_next/share/terminfo/a/att610-w differ diff --git a/llava_next/share/terminfo/a/att615-w b/llava_next/share/terminfo/a/att615-w new file mode 100644 index 0000000000000000000000000000000000000000..3c811b47fde47513646fa4a35dc240c710734936 Binary files /dev/null and b/llava_next/share/terminfo/a/att615-w differ diff --git a/llava_next/share/terminfo/a/att620-103k b/llava_next/share/terminfo/a/att620-103k new file mode 100644 index 0000000000000000000000000000000000000000..ff3179212f7d22fb9e5cb80b64ab75dd1986a189 Binary files /dev/null and b/llava_next/share/terminfo/a/att620-103k differ diff --git a/llava_next/share/terminfo/a/att730 b/llava_next/share/terminfo/a/att730 new file mode 100644 index 0000000000000000000000000000000000000000..6bc6b74ba1bf64fc809dde49fbdb2e3e7f5c6746 Binary files /dev/null and b/llava_next/share/terminfo/a/att730 differ diff --git a/llava_next/share/terminfo/a/att730-41 b/llava_next/share/terminfo/a/att730-41 new file mode 100644 index 0000000000000000000000000000000000000000..085e285669cbaf42ba0e8d939f8649162e6c1d8f Binary files /dev/null and b/llava_next/share/terminfo/a/att730-41 differ diff --git a/llava_next/share/terminfo/a/att730r b/llava_next/share/terminfo/a/att730r new file mode 100644 index 0000000000000000000000000000000000000000..12751e4437395a60ef6b8c186e30b89dc8d9846e Binary files /dev/null and b/llava_next/share/terminfo/a/att730r differ diff --git a/llava_next/share/terminfo/a/avatar1 b/llava_next/share/terminfo/a/avatar1 new file mode 100644 index 0000000000000000000000000000000000000000..15b4ae0edb0951c27572fb3644beec9b429a1d35 Binary files /dev/null and b/llava_next/share/terminfo/a/avatar1 differ diff --git a/llava_next/share/terminfo/a/avt+s b/llava_next/share/terminfo/a/avt+s new file mode 100644 index 0000000000000000000000000000000000000000..0d338a8ba17e05b42da58102013836c95e1ad60a Binary files /dev/null and b/llava_next/share/terminfo/a/avt+s differ diff --git a/llava_next/share/terminfo/a/avt-s b/llava_next/share/terminfo/a/avt-s new file mode 100644 index 0000000000000000000000000000000000000000..d38aa508e932054b06a0142e0562c8f38ccc2a0d Binary files /dev/null and b/llava_next/share/terminfo/a/avt-s differ diff --git a/llava_next/share/terminfo/a/avt-w-rv-ns b/llava_next/share/terminfo/a/avt-w-rv-ns new file mode 100644 index 0000000000000000000000000000000000000000..1c369ccab5eedef29fd9ccfbb08954cc68eb2b5e Binary files /dev/null and b/llava_next/share/terminfo/a/avt-w-rv-ns differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..88721691ca02d33c89009d5560f2f0aed5d4ee74 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_math_ops.py @@ -0,0 +1,13689 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_Abs_T = TypeVar("TV_Abs_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +def _abs(x: Annotated[Any, TV_Abs_T], name=None) -> Annotated[Any, TV_Abs_T]: + r"""Computes the absolute value of a tensor. + + Given a tensor `x`, this operation returns a tensor containing the absolute + value of each element in `x`. For example, if x is an input element and y is + an output element, this operation computes \\(y = |x|\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Abs", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _abs_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Abs", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Abs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Abs = tf_export("raw_ops.Abs")(_ops.to_raw_op(_abs)) + + +def _abs_eager_fallback(x: Annotated[Any, TV_Abs_T], name, ctx) -> Annotated[Any, TV_Abs_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Abs", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Abs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_AccumulateNV2_T = TypeVar("TV_AccumulateNV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def accumulate_nv2(inputs: Annotated[List[Any], TV_AccumulateNV2_T], shape, name=None) -> Annotated[Any, TV_AccumulateNV2_T]: + r"""Returns the element-wise sum of a list of tensors. + + `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + wait for all of its inputs to be ready before beginning to sum. This can + save memory if inputs are ready at different times, since minimum temporary + storage is proportional to the output size rather than the inputs size. + + Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + + Returns a `Tensor` of same shape and type as the elements of `inputs`. + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + A list of `Tensor` objects, each with same shape and type. + shape: A `tf.TensorShape` or list of `ints`. + Shape of elements of `inputs`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AccumulateNV2", name, inputs, "shape", shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return accumulate_nv2_eager_fallback( + inputs, shape=shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'accumulate_nv2' Op, not %r." % inputs) + _attr_N = len(inputs) + shape = _execute.make_shape(shape, "shape") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AccumulateNV2", inputs=inputs, shape=shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), + "shape", _op.get_attr("shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AccumulateNV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AccumulateNV2 = tf_export("raw_ops.AccumulateNV2")(_ops.to_raw_op(accumulate_nv2)) + + +def accumulate_nv2_eager_fallback(inputs: Annotated[List[Any], TV_AccumulateNV2_T], shape, name, ctx) -> Annotated[Any, TV_AccumulateNV2_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'accumulate_nv2' Op, not %r." % inputs) + _attr_N = len(inputs) + shape = _execute.make_shape(shape, "shape") + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape) + _result = _execute.execute(b"AccumulateNV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AccumulateNV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Acos_T = TypeVar("TV_Acos_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def acos(x: Annotated[Any, TV_Acos_T], name=None) -> Annotated[Any, TV_Acos_T]: + r"""Computes acos of x element-wise. + + + Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + + Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Acos", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return acos_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Acos", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Acos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Acos = tf_export("raw_ops.Acos")(_ops.to_raw_op(acos)) + + +def acos_eager_fallback(x: Annotated[Any, TV_Acos_T], name, ctx) -> Annotated[Any, TV_Acos_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Acos", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Acos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Acosh_T = TypeVar("TV_Acosh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.acosh', 'acosh') +def acosh(x: Annotated[Any, TV_Acosh_T], name=None) -> Annotated[Any, TV_Acosh_T]: + r"""Computes inverse hyperbolic cosine of x element-wise. + + Given an input tensor, the function computes inverse hyperbolic cosine of every element. + Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + + ```python + x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Acosh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_acosh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return acosh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + acosh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_acosh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Acosh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + acosh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Acosh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Acosh = tf_export("raw_ops.Acosh")(_ops.to_raw_op(acosh)) +_dispatcher_for_acosh = acosh._tf_type_based_dispatcher.Dispatch + + +def acosh_eager_fallback(x: Annotated[Any, TV_Acosh_T], name, ctx) -> Annotated[Any, TV_Acosh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Acosh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Acosh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Add_T = TypeVar("TV_Add_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.String, _atypes.UInt8) + +def add(x: Annotated[Any, TV_Add_T], y: Annotated[Any, TV_Add_T], name=None) -> Annotated[Any, TV_Add_T]: + r"""Returns x + y element-wise. + + *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor. + + Both input and output have a range `(-inf, inf)`. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `string`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Add", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return add_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Add", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Add", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Add = tf_export("raw_ops.Add")(_ops.to_raw_op(add)) + + +def add_eager_fallback(x: Annotated[Any, TV_Add_T], y: Annotated[Any, TV_Add_T], name, ctx) -> Annotated[Any, TV_Add_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, _dtypes.string, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Add", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Add", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_AddN_T = TypeVar("TV_AddN_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def add_n(inputs: Annotated[List[Any], TV_AddN_T], name=None) -> Annotated[Any, TV_AddN_T]: + r"""Add all input tensors element wise. + + Inputs must be of same size and shape. + + ```python + x = [9, 7, 10] + tf.math.add_n(x) ==> 26 + ``` + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AddN", name, inputs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return add_n_eager_fallback( + inputs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'add_n' Op, not %r." % inputs) + _attr_N = len(inputs) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AddN", inputs=inputs, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AddN", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AddN = tf_export("raw_ops.AddN")(_ops.to_raw_op(add_n)) + + +def add_n_eager_fallback(inputs: Annotated[List[Any], TV_AddN_T], name, ctx) -> Annotated[Any, TV_AddN_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'add_n' Op, not %r." % inputs) + _attr_N = len(inputs) + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.variant, ]) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "T", _attr_T) + _result = _execute.execute(b"AddN", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AddN", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_AddV2_T = TypeVar("TV_AddV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def add_v2(x: Annotated[Any, TV_AddV2_T], y: Annotated[Any, TV_AddV2_T], name=None) -> Annotated[Any, TV_AddV2_T]: + r"""Returns x + y element-wise. + + *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AddV2", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return add_v2_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AddV2", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AddV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AddV2 = tf_export("raw_ops.AddV2")(_ops.to_raw_op(add_v2)) + + +def add_v2_eager_fallback(x: Annotated[Any, TV_AddV2_T], y: Annotated[Any, TV_AddV2_T], name, ctx) -> Annotated[Any, TV_AddV2_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"AddV2", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AddV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_All_Tidx = TypeVar("TV_All_Tidx", _atypes.Int32, _atypes.Int64) + +def _all(input: Annotated[Any, _atypes.Bool], axis: Annotated[Any, TV_All_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Computes the "logical and" of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor` of type `bool`. The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "All", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _all_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "All", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", + _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "All", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +All = tf_export("raw_ops.All")(_ops.to_raw_op(_all)) + + +def _all_eager_fallback(input: Annotated[Any, _atypes.Bool], axis: Annotated[Any, TV_All_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, _atypes.Bool]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + input = _ops.convert_to_tensor(input, _dtypes.bool) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx) + _result = _execute.execute(b"All", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "All", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Angle_T = TypeVar("TV_Angle_T", _atypes.Complex128, _atypes.Complex64) +TV_Angle_Tout = TypeVar("TV_Angle_Tout", _atypes.Float32, _atypes.Float64) + +def angle(input: Annotated[Any, TV_Angle_T], Tout:TV_Angle_Tout=_dtypes.float32, name=None) -> Annotated[Any, TV_Angle_Tout]: + r"""Returns the argument of a complex number. + + Given a tensor `input` of complex numbers, this operation returns a tensor of + type `float` that is the argument of each element in `input`. All elements in + `input` must be complex numbers of the form \\(a + bj\\), where *a* + is the real part and *b* is the imaginary part. + + The argument returned by this operation is of the form \\(atan2(b, a)\\). + + For example: + + ``` + # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.math.angle(input) ==> [2.0132, 1.056] + ``` + + @compatibility(numpy) + Equivalent to np.angle. + @end_compatibility + + Args: + input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. + Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Angle", name, input, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return angle_eager_fallback( + input, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Angle", input=input, Tout=Tout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tout", + _op._get_attr_type("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Angle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Angle = tf_export("raw_ops.Angle")(_ops.to_raw_op(angle)) + + +def angle_eager_fallback(input: Annotated[Any, TV_Angle_T], Tout: TV_Angle_Tout, name, ctx) -> Annotated[Any, TV_Angle_Tout]: + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.complex64, _dtypes.complex128, ], _dtypes.complex64) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "Tout", Tout) + _result = _execute.execute(b"Angle", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Angle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Any_Tidx = TypeVar("TV_Any_Tidx", _atypes.Int32, _atypes.Int64) + +def _any(input: Annotated[Any, _atypes.Bool], axis: Annotated[Any, TV_Any_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Computes the "logical or" of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor` of type `bool`. The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Any", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _any_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Any", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", + _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Any", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Any = tf_export("raw_ops.Any")(_ops.to_raw_op(_any)) + + +def _any_eager_fallback(input: Annotated[Any, _atypes.Bool], axis: Annotated[Any, TV_Any_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, _atypes.Bool]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + input = _ops.convert_to_tensor(input, _dtypes.bool) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Any", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Any", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ApproximateEqual_T = TypeVar("TV_ApproximateEqual_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def approximate_equal(x: Annotated[Any, TV_ApproximateEqual_T], y: Annotated[Any, TV_ApproximateEqual_T], tolerance:float=1e-05, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of abs(x-y) < tolerance element-wise. + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + tolerance: An optional `float`. Defaults to `1e-05`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ApproximateEqual", name, x, y, "tolerance", tolerance) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return approximate_equal_eager_fallback( + x, y, tolerance=tolerance, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if tolerance is None: + tolerance = 1e-05 + tolerance = _execute.make_float(tolerance, "tolerance") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ApproximateEqual", x=x, y=y, tolerance=tolerance, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "tolerance", + _op.get_attr("tolerance")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ApproximateEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ApproximateEqual = tf_export("raw_ops.ApproximateEqual")(_ops.to_raw_op(approximate_equal)) + + +def approximate_equal_eager_fallback(x: Annotated[Any, TV_ApproximateEqual_T], y: Annotated[Any, TV_ApproximateEqual_T], tolerance: float, name, ctx) -> Annotated[Any, _atypes.Bool]: + if tolerance is None: + tolerance = 1e-05 + tolerance = _execute.make_float(tolerance, "tolerance") + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T, "tolerance", tolerance) + _result = _execute.execute(b"ApproximateEqual", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ApproximateEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ArgMax_T = TypeVar("TV_ArgMax_T", _atypes.BFloat16, _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ArgMax_Tidx = TypeVar("TV_ArgMax_Tidx", _atypes.Int16, _atypes.Int32, _atypes.Int64) +TV_ArgMax_output_type = TypeVar("TV_ArgMax_output_type", _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.UInt16) + +def arg_max(input: Annotated[Any, TV_ArgMax_T], dimension: Annotated[Any, TV_ArgMax_Tidx], output_type:TV_ArgMax_output_type=_dtypes.int64, name=None) -> Annotated[Any, TV_ArgMax_output_type]: + r"""Returns the index with the largest value across dimensions of a tensor. + + Note that in case of ties the identity of the return value is not guaranteed. + + Usage: + ```python + import tensorflow as tf + a = [1, 10, 26.9, 2.8, 166.32, 62.3] + b = tf.math.argmax(input = a) + c = tf.keras.backend.eval(b) + # c = 4 + # here a[4] = 166.32 which is the largest element of a across axis 0 + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`, `bool`. + dimension: A `Tensor`. Must be one of the following types: `int16`, `int32`, `int64`. + int16, int32 or int64, must be in the range `[-rank(input), rank(input))`. + Describes which dimension of the input Tensor to reduce across. For vectors, + use dimension = 0. + output_type: An optional `tf.DType` from: `tf.int16, tf.uint16, tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `output_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ArgMax", name, input, dimension, "output_type", output_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return arg_max_eager_fallback( + input, dimension, output_type=output_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_type is None: + output_type = _dtypes.int64 + output_type = _execute.make_type(output_type, "output_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ArgMax", input=input, dimension=dimension, output_type=output_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "output_type", + _op._get_attr_type("output_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ArgMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ArgMax = tf_export("raw_ops.ArgMax")(_ops.to_raw_op(arg_max)) + + +def arg_max_eager_fallback(input: Annotated[Any, TV_ArgMax_T], dimension: Annotated[Any, TV_ArgMax_Tidx], output_type: TV_ArgMax_output_type, name, ctx) -> Annotated[Any, TV_ArgMax_output_type]: + if output_type is None: + output_type = _dtypes.int64 + output_type = _execute.make_type(output_type, "output_type") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, _dtypes.bool, ]) + _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], ctx, [_dtypes.int16, _dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, dimension] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type) + _result = _execute.execute(b"ArgMax", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ArgMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ArgMin_T = TypeVar("TV_ArgMin_T", _atypes.BFloat16, _atypes.Bool, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_ArgMin_Tidx = TypeVar("TV_ArgMin_Tidx", _atypes.Int32, _atypes.Int64) +TV_ArgMin_output_type = TypeVar("TV_ArgMin_output_type", _atypes.Int32, _atypes.Int64) + +def arg_min(input: Annotated[Any, TV_ArgMin_T], dimension: Annotated[Any, TV_ArgMin_Tidx], output_type:TV_ArgMin_output_type=_dtypes.int64, name=None) -> Annotated[Any, TV_ArgMin_output_type]: + r"""Returns the index with the smallest value across dimensions of a tensor. + + Note that in case of ties the identity of the return value is not guaranteed. + + Usage: + ```python + import tensorflow as tf + a = [1, 10, 26.9, 2.8, 166.32, 62.3] + b = tf.math.argmin(input = a) + c = tf.keras.backend.eval(b) + # c = 0 + # here a[0] = 1 which is the smallest element of a across axis 0 + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`, `bool`. + dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`. + int32 or int64, must be in the range `[-rank(input), rank(input))`. + Describes which dimension of the input Tensor to reduce across. For vectors, + use dimension = 0. + output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `output_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ArgMin", name, input, dimension, "output_type", output_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return arg_min_eager_fallback( + input, dimension, output_type=output_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_type is None: + output_type = _dtypes.int64 + output_type = _execute.make_type(output_type, "output_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ArgMin", input=input, dimension=dimension, output_type=output_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "output_type", + _op._get_attr_type("output_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ArgMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ArgMin = tf_export("raw_ops.ArgMin")(_ops.to_raw_op(arg_min)) + + +def arg_min_eager_fallback(input: Annotated[Any, TV_ArgMin_T], dimension: Annotated[Any, TV_ArgMin_Tidx], output_type: TV_ArgMin_output_type, name, ctx) -> Annotated[Any, TV_ArgMin_output_type]: + if output_type is None: + output_type = _dtypes.int64 + output_type = _execute.make_type(output_type, "output_type") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, _dtypes.bool, ]) + _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, dimension] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type) + _result = _execute.execute(b"ArgMin", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ArgMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Asin_T = TypeVar("TV_Asin_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.asin', 'asin') +def asin(x: Annotated[Any, TV_Asin_T], name=None) -> Annotated[Any, TV_Asin_T]: + r"""Computes the trignometric inverse sine of x element-wise. + + The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + + **Note**: The output of `tf.math.asin` will lie within the invertible range + of sine, i.e [-pi/2, pi/2]. + + For example: + + ```python + # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + x = tf.constant([1.047, 0.785]) + y = tf.math.sin(x) # [0.8659266, 0.7068252] + + tf.math.asin(y) # [1.047, 0.785] = x + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Asin", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_asin( + (x, name,), None) + if _result is not NotImplemented: + return _result + return asin_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + asin, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_asin( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Asin", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + asin, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Asin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Asin = tf_export("raw_ops.Asin")(_ops.to_raw_op(asin)) +_dispatcher_for_asin = asin._tf_type_based_dispatcher.Dispatch + + +def asin_eager_fallback(x: Annotated[Any, TV_Asin_T], name, ctx) -> Annotated[Any, TV_Asin_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Asin", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Asin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Asinh_T = TypeVar("TV_Asinh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.asinh', 'asinh') +def asinh(x: Annotated[Any, TV_Asinh_T], name=None) -> Annotated[Any, TV_Asinh_T]: + r"""Computes inverse hyperbolic sine of x element-wise. + + Given an input tensor, this function computes inverse hyperbolic sine + for every element in the tensor. Both input and output has a range of + `[-inf, inf]`. + + ```python + x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Asinh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_asinh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return asinh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + asinh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_asinh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Asinh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + asinh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Asinh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Asinh = tf_export("raw_ops.Asinh")(_ops.to_raw_op(asinh)) +_dispatcher_for_asinh = asinh._tf_type_based_dispatcher.Dispatch + + +def asinh_eager_fallback(x: Annotated[Any, TV_Asinh_T], name, ctx) -> Annotated[Any, TV_Asinh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Asinh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Asinh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Atan_T = TypeVar("TV_Atan_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.atan', 'atan') +def atan(x: Annotated[Any, TV_Atan_T], name=None) -> Annotated[Any, TV_Atan_T]: + r"""Computes the trignometric inverse tangent of x element-wise. + + The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + + **Note**: The output of `tf.math.atan` will lie within the invertible range + of tan, i.e (-pi/2, pi/2). + + For example: + + ```python + # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + x = tf.constant([1.047, 0.785]) + y = tf.math.tan(x) # [1.731261, 0.99920404] + + tf.math.atan(y) # [1.047, 0.785] = x + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Atan", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_atan( + (x, name,), None) + if _result is not NotImplemented: + return _result + return atan_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_atan( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Atan", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Atan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Atan = tf_export("raw_ops.Atan")(_ops.to_raw_op(atan)) +_dispatcher_for_atan = atan._tf_type_based_dispatcher.Dispatch + + +def atan_eager_fallback(x: Annotated[Any, TV_Atan_T], name, ctx) -> Annotated[Any, TV_Atan_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Atan", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Atan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Atan2_T = TypeVar("TV_Atan2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.atan2', 'atan2') +def atan2(y: Annotated[Any, TV_Atan2_T], x: Annotated[Any, TV_Atan2_T], name=None) -> Annotated[Any, TV_Atan2_T]: + r"""Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + + This is the angle \\( \theta \in [-\pi, \pi] \\) such that + \\[ x = r \cos(\theta) \\] + and + \\[ y = r \sin(\theta) \\] + where \\(r = \sqrt{x^2 + y^2} \\). + + For example: + + >>> x = [1., 1.] + >>> y = [1., -1.] + >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) + [ 45. -45.] + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + x: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Atan2", name, y, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_atan2( + (y, x, name,), None) + if _result is not NotImplemented: + return _result + return atan2_eager_fallback( + y, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atan2, (), dict(y=y, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_atan2( + (y, x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Atan2", y=y, x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atan2, (), dict(y=y, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Atan2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Atan2 = tf_export("raw_ops.Atan2")(_ops.to_raw_op(atan2)) +_dispatcher_for_atan2 = atan2._tf_type_based_dispatcher.Dispatch + + +def atan2_eager_fallback(y: Annotated[Any, TV_Atan2_T], x: Annotated[Any, TV_Atan2_T], name, ctx) -> Annotated[Any, TV_Atan2_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (y, x) = _inputs_T + _inputs_flat = [y, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Atan2", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Atan2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Atanh_T = TypeVar("TV_Atanh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.atanh', 'atanh') +def atanh(x: Annotated[Any, TV_Atanh_T], name=None) -> Annotated[Any, TV_Atanh_T]: + r"""Computes inverse hyperbolic tangent of x element-wise. + + Given an input tensor, this function computes inverse hyperbolic tangent + for every element in the tensor. Input range is `[-1,1]` and output range is + `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + input is `1`, output will be `inf`. Values outside the range will have + `nan` as output. + + ```python + x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Atanh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_atanh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return atanh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atanh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_atanh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Atanh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + atanh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Atanh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Atanh = tf_export("raw_ops.Atanh")(_ops.to_raw_op(atanh)) +_dispatcher_for_atanh = atanh._tf_type_based_dispatcher.Dispatch + + +def atanh_eager_fallback(x: Annotated[Any, TV_Atanh_T], name, ctx) -> Annotated[Any, TV_Atanh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Atanh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Atanh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BatchMatMul_T = TypeVar("TV_BatchMatMul_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def batch_mat_mul(x: Annotated[Any, TV_BatchMatMul_T], y: Annotated[Any, TV_BatchMatMul_T], adj_x:bool=False, adj_y:bool=False, grad_x:bool=False, grad_y:bool=False, name=None) -> Annotated[Any, TV_BatchMatMul_T]: + r"""Multiplies slices of two tensors in batches. + + Multiplies all slices of `Tensor` `x` and `y` (each slice can be + viewed as an element of a batch), and arranges the individual results + in a single output tensor of the same batch size. Each of the + individual slices can optionally be adjointed (to adjoint a matrix + means to transpose and conjugate it) before multiplication by setting + the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + + The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`. + + The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + + r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y + + It is computed as: + + output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. + 2-D or higher with shape `[..., r_x, c_x]`. + y: A `Tensor`. Must have the same type as `x`. + 2-D or higher with shape `[..., r_y, c_y]`. + adj_x: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `x`. Defaults to `False`. + adj_y: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `y`. Defaults to `False`. + grad_x: An optional `bool`. Defaults to `False`. + grad_y: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchMatMul", name, x, y, "adj_x", adj_x, "adj_y", adj_y, + "grad_x", grad_x, "grad_y", grad_y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_mat_mul_eager_fallback( + x, y, adj_x=adj_x, adj_y=adj_y, grad_x=grad_x, grad_y=grad_y, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, grad_x=grad_x, + grad_y=grad_y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "adj_x", + _op._get_attr_bool("adj_x"), "adj_y", + _op._get_attr_bool("adj_y"), "grad_x", + _op._get_attr_bool("grad_x"), "grad_y", + _op._get_attr_bool("grad_y")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BatchMatMul = tf_export("raw_ops.BatchMatMul")(_ops.to_raw_op(batch_mat_mul)) + + +def batch_mat_mul_eager_fallback(x: Annotated[Any, TV_BatchMatMul_T], y: Annotated[Any, TV_BatchMatMul_T], adj_x: bool, adj_y: bool, grad_x: bool, grad_y: bool, name, ctx) -> Annotated[Any, TV_BatchMatMul_T]: + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y, "grad_x", grad_x, + "grad_y", grad_y) + _result = _execute.execute(b"BatchMatMul", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BatchMatMulV2_T = TypeVar("TV_BatchMatMulV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def batch_mat_mul_v2(x: Annotated[Any, TV_BatchMatMulV2_T], y: Annotated[Any, TV_BatchMatMulV2_T], adj_x:bool=False, adj_y:bool=False, grad_x:bool=False, grad_y:bool=False, name=None) -> Annotated[Any, TV_BatchMatMulV2_T]: + r"""Multiplies slices of two tensors in batches. + + Multiplies all slices of `Tensor` `x` and `y` (each slice can be + viewed as an element of a batch), and arranges the individual results + in a single output tensor of the same batch size. Each of the + individual slices can optionally be adjointed (to adjoint a matrix + means to transpose and conjugate it) before multiplication by setting + the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + + The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`. + + The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + + r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y + + It is computed as: + + output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + + *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More + about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`. + 2-D or higher with shape `[..., r_x, c_x]`. + y: A `Tensor`. Must have the same type as `x`. + 2-D or higher with shape `[..., r_y, c_y]`. + adj_x: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `x`. Defaults to `False`. + adj_y: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `y`. Defaults to `False`. + grad_x: An optional `bool`. Defaults to `False`. + grad_y: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchMatMulV2", name, x, y, "adj_x", adj_x, "adj_y", adj_y, + "grad_x", grad_x, "grad_y", grad_y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_mat_mul_v2_eager_fallback( + x, y, adj_x=adj_x, adj_y=adj_y, grad_x=grad_x, grad_y=grad_y, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchMatMulV2", x=x, y=y, adj_x=adj_x, adj_y=adj_y, grad_x=grad_x, + grad_y=grad_y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "adj_x", + _op._get_attr_bool("adj_x"), "adj_y", + _op._get_attr_bool("adj_y"), "grad_x", + _op._get_attr_bool("grad_x"), "grad_y", + _op._get_attr_bool("grad_y")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchMatMulV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BatchMatMulV2 = tf_export("raw_ops.BatchMatMulV2")(_ops.to_raw_op(batch_mat_mul_v2)) + + +def batch_mat_mul_v2_eager_fallback(x: Annotated[Any, TV_BatchMatMulV2_T], y: Annotated[Any, TV_BatchMatMulV2_T], adj_x: bool, adj_y: bool, grad_x: bool, grad_y: bool, name, ctx) -> Annotated[Any, TV_BatchMatMulV2_T]: + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y, "grad_x", grad_x, + "grad_y", grad_y) + _result = _execute.execute(b"BatchMatMulV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchMatMulV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_BatchMatMulV3_Ta = TypeVar("TV_BatchMatMulV3_Ta", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt8) +TV_BatchMatMulV3_Tb = TypeVar("TV_BatchMatMulV3_Tb", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt8) +TV_BatchMatMulV3_Tout = TypeVar("TV_BatchMatMulV3_Tout", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64) + +def batch_mat_mul_v3(x: Annotated[Any, TV_BatchMatMulV3_Ta], y: Annotated[Any, TV_BatchMatMulV3_Tb], Tout: TV_BatchMatMulV3_Tout, adj_x:bool=False, adj_y:bool=False, grad_x:bool=False, grad_y:bool=False, name=None) -> Annotated[Any, TV_BatchMatMulV3_Tout]: + r"""Multiplies slices of two tensors in batches. + + Multiplies all slices of `Tensor` `x` and `y` (each slice can be + viewed as an element of a batch), and arranges the individual results + in a single output tensor of the same batch size. Each of the + individual slices can optionally be adjointed (to adjoint a matrix + means to transpose and conjugate it) before multiplication by setting + the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + + The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`. + + The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + + r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y + + It is computed as: + + output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + + *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More + about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + 2-D or higher with shape `[..., r_x, c_x]`. + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + 2-D or higher with shape `[..., r_y, c_y]`. + Tout: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int16, tf.int32, tf.int64, tf.complex64, tf.complex128`. + If not spcified, Tout is the same type to input type. + adj_x: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `x`. Defaults to `False`. + adj_y: An optional `bool`. Defaults to `False`. + If `True`, adjoint the slices of `y`. Defaults to `False`. + grad_x: An optional `bool`. Defaults to `False`. + grad_y: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BatchMatMulV3", name, x, y, "Tout", Tout, "adj_x", adj_x, + "adj_y", adj_y, "grad_x", grad_x, "grad_y", grad_y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return batch_mat_mul_v3_eager_fallback( + x, y, Tout=Tout, adj_x=adj_x, adj_y=adj_y, grad_x=grad_x, + grad_y=grad_y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + Tout = _execute.make_type(Tout, "Tout") + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BatchMatMulV3", x=x, y=y, Tout=Tout, adj_x=adj_x, adj_y=adj_y, + grad_x=grad_x, grad_y=grad_y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Ta", _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb"), + "Tout", _op._get_attr_type("Tout"), "adj_x", + _op._get_attr_bool("adj_x"), "adj_y", + _op._get_attr_bool("adj_y"), "grad_x", + _op._get_attr_bool("grad_x"), "grad_y", + _op._get_attr_bool("grad_y")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BatchMatMulV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BatchMatMulV3 = tf_export("raw_ops.BatchMatMulV3")(_ops.to_raw_op(batch_mat_mul_v3)) + + +def batch_mat_mul_v3_eager_fallback(x: Annotated[Any, TV_BatchMatMulV3_Ta], y: Annotated[Any, TV_BatchMatMulV3_Tb], Tout: TV_BatchMatMulV3_Tout, adj_x: bool, adj_y: bool, grad_x: bool, grad_y: bool, name, ctx) -> Annotated[Any, TV_BatchMatMulV3_Tout]: + Tout = _execute.make_type(Tout, "Tout") + if adj_x is None: + adj_x = False + adj_x = _execute.make_bool(adj_x, "adj_x") + if adj_y is None: + adj_y = False + adj_y = _execute.make_bool(adj_y, "adj_y") + if grad_x is None: + grad_x = False + grad_x = _execute.make_bool(grad_x, "grad_x") + if grad_y is None: + grad_y = False + grad_y = _execute.make_bool(grad_y, "grad_y") + _attr_Ta, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _attr_Tb, (y,) = _execute.args_to_matching_eager([y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x, y] + _attrs = ("Ta", _attr_Ta, "Tb", _attr_Tb, "Tout", Tout, "adj_x", adj_x, + "adj_y", adj_y, "grad_x", grad_x, "grad_y", grad_y) + _result = _execute.execute(b"BatchMatMulV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BatchMatMulV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Betainc_T = TypeVar("TV_Betainc_T", _atypes.Float32, _atypes.Float64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.betainc', v1=['math.betainc', 'betainc']) +@deprecated_endpoints('betainc') +def betainc(a: Annotated[Any, TV_Betainc_T], b: Annotated[Any, TV_Betainc_T], x: Annotated[Any, TV_Betainc_T], name=None) -> Annotated[Any, TV_Betainc_T]: + r"""Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + + The regularized incomplete beta integral is defined as: + + + \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) + + where + + + \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) + + + is the incomplete beta function and \\(B(a, b)\\) is the *complete* + beta function. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`. + b: A `Tensor`. Must have the same type as `a`. + x: A `Tensor`. Must have the same type as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Betainc", name, a, b, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_betainc( + (a, b, x, name,), None) + if _result is not NotImplemented: + return _result + return betainc_eager_fallback( + a, b, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + betainc, (), dict(a=a, b=b, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_betainc( + (a, b, x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Betainc", a=a, b=b, x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + betainc, (), dict(a=a, b=b, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Betainc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Betainc = tf_export("raw_ops.Betainc")(_ops.to_raw_op(betainc)) +_dispatcher_for_betainc = betainc._tf_type_based_dispatcher.Dispatch + + +def betainc_eager_fallback(a: Annotated[Any, TV_Betainc_T], b: Annotated[Any, TV_Betainc_T], x: Annotated[Any, TV_Betainc_T], name, ctx) -> Annotated[Any, TV_Betainc_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b, x], ctx, [_dtypes.float32, _dtypes.float64, ]) + (a, b, x) = _inputs_T + _inputs_flat = [a, b, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Betainc", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Betainc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Bincount_T = TypeVar("TV_Bincount_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def bincount(arr: Annotated[Any, _atypes.Int32], size: Annotated[Any, _atypes.Int32], weights: Annotated[Any, TV_Bincount_T], name=None) -> Annotated[Any, TV_Bincount_T]: + r"""Counts the number of occurrences of each value in an integer array. + + Outputs a vector with length `size` and the same dtype as `weights`. If + `weights` are empty, then index `i` stores the number of times the value `i` is + counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + the value in `weights` at each index where the corresponding value in `arr` is + `i`. + + Values in `arr` outside of the range [0, size) are ignored. + + Args: + arr: A `Tensor` of type `int32`. int32 `Tensor`. + size: A `Tensor` of type `int32`. non-negative int32 scalar `Tensor`. + weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + is an int32, int64, float32, or float64 `Tensor` with the same + shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + equal to 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `weights`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Bincount", name, arr, size, weights) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bincount_eager_fallback( + arr, size, weights, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Bincount", arr=arr, size=size, weights=weights, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Bincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Bincount = tf_export("raw_ops.Bincount")(_ops.to_raw_op(bincount)) + + +def bincount_eager_fallback(arr: Annotated[Any, _atypes.Int32], size: Annotated[Any, _atypes.Int32], weights: Annotated[Any, TV_Bincount_T], name, ctx) -> Annotated[Any, TV_Bincount_T]: + _attr_T, (weights,) = _execute.args_to_matching_eager([weights], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + arr = _ops.convert_to_tensor(arr, _dtypes.int32) + size = _ops.convert_to_tensor(size, _dtypes.int32) + _inputs_flat = [arr, size, weights] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Bincount", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Bincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Bucketize_T = TypeVar("TV_Bucketize_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def bucketize(input: Annotated[Any, TV_Bucketize_T], boundaries, name=None) -> Annotated[Any, _atypes.Int32]: + r"""Bucketizes 'input' based on 'boundaries'. + + For example, if the inputs are + boundaries = [0, 10, 100] + input = [[-5, 10000] + [150, 10] + [5, 100]] + + then the output will be + output = [[0, 3] + [3, 2] + [1, 3]] + + Args: + input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + Any shape of Tensor contains with int or float type. + boundaries: A list of `floats`. + A sorted list of floats gives the boundary of the buckets. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Bucketize", name, input, "boundaries", boundaries) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bucketize_eager_fallback( + input, boundaries=boundaries, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'boundaries' argument to " + "'bucketize' Op, not %r." % boundaries) + boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Bucketize", input=input, boundaries=boundaries, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "boundaries", + _op.get_attr("boundaries")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Bucketize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Bucketize = tf_export("raw_ops.Bucketize")(_ops.to_raw_op(bucketize)) + + +def bucketize_eager_fallback(input: Annotated[Any, TV_Bucketize_T], boundaries, name, ctx) -> Annotated[Any, _atypes.Int32]: + if not isinstance(boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'boundaries' argument to " + "'bucketize' Op, not %r." % boundaries) + boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries] + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "boundaries", boundaries) + _result = _execute.execute(b"Bucketize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Bucketize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cast_SrcT = TypeVar("TV_Cast_SrcT", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_Cast_DstT = TypeVar("TV_Cast_DstT", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def cast(x: Annotated[Any, TV_Cast_SrcT], DstT: TV_Cast_DstT, Truncate:bool=False, name=None) -> Annotated[Any, TV_Cast_DstT]: + r"""Cast x of type SrcT to y of DstT. + + Args: + x: A `Tensor`. + DstT: A `tf.DType`. + Truncate: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `DstT`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cast", name, x, "DstT", DstT, "Truncate", Truncate) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cast_eager_fallback( + x, DstT=DstT, Truncate=Truncate, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + DstT = _execute.make_type(DstT, "DstT") + if Truncate is None: + Truncate = False + Truncate = _execute.make_bool(Truncate, "Truncate") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cast", x=x, DstT=DstT, Truncate=Truncate, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("SrcT", _op._get_attr_type("SrcT"), "DstT", + _op._get_attr_type("DstT"), "Truncate", + _op._get_attr_bool("Truncate")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cast", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cast = tf_export("raw_ops.Cast")(_ops.to_raw_op(cast)) + + +def cast_eager_fallback(x: Annotated[Any, TV_Cast_SrcT], DstT: TV_Cast_DstT, Truncate: bool, name, ctx) -> Annotated[Any, TV_Cast_DstT]: + DstT = _execute.make_type(DstT, "DstT") + if Truncate is None: + Truncate = False + Truncate = _execute.make_bool(Truncate, "Truncate") + _attr_SrcT, (x,) = _execute.args_to_matching_eager([x], ctx, []) + _inputs_flat = [x] + _attrs = ("SrcT", _attr_SrcT, "DstT", DstT, "Truncate", Truncate) + _result = _execute.execute(b"Cast", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cast", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Ceil_T = TypeVar("TV_Ceil_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def ceil(x: Annotated[Any, TV_Ceil_T], name=None) -> Annotated[Any, TV_Ceil_T]: + r"""Returns element-wise smallest integer not less than x. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Ceil", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ceil_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Ceil", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Ceil", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Ceil = tf_export("raw_ops.Ceil")(_ops.to_raw_op(ceil)) + + +def ceil_eager_fallback(x: Annotated[Any, TV_Ceil_T], name, ctx) -> Annotated[Any, TV_Ceil_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Ceil", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Ceil", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ClipByValue_T = TypeVar("TV_ClipByValue_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def _clip_by_value(t: Annotated[Any, TV_ClipByValue_T], clip_value_min: Annotated[Any, TV_ClipByValue_T], clip_value_max: Annotated[Any, TV_ClipByValue_T], name=None) -> Annotated[Any, TV_ClipByValue_T]: + r"""Clips tensor values to a specified min and max. + + Given a tensor `t`, this operation returns a tensor of the same type and + shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + Any values less than `clip_value_min` are set to `clip_value_min`. Any values + greater than `clip_value_max` are set to `clip_value_max`. + + Args: + t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + A `Tensor`. + clip_value_min: A `Tensor`. Must have the same type as `t`. + A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + as `t`. The minimum value to clip by. + clip_value_max: A `Tensor`. Must have the same type as `t`. + A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + as `t`. The maximum value to clip by. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `t`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ClipByValue", name, t, clip_value_min, clip_value_max) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _clip_by_value_eager_fallback( + t, clip_value_min, clip_value_max, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ClipByValue", t=t, clip_value_min=clip_value_min, + clip_value_max=clip_value_max, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ClipByValue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ClipByValue = tf_export("raw_ops.ClipByValue")(_ops.to_raw_op(_clip_by_value)) + + +def _clip_by_value_eager_fallback(t: Annotated[Any, TV_ClipByValue_T], clip_value_min: Annotated[Any, TV_ClipByValue_T], clip_value_max: Annotated[Any, TV_ClipByValue_T], name, ctx) -> Annotated[Any, TV_ClipByValue_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([t, clip_value_min, clip_value_max], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (t, clip_value_min, clip_value_max) = _inputs_T + _inputs_flat = [t, clip_value_min, clip_value_max] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"ClipByValue", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ClipByValue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Complex_T = TypeVar("TV_Complex_T", _atypes.Float32, _atypes.Float64) +TV_Complex_Tout = TypeVar("TV_Complex_Tout", _atypes.Complex128, _atypes.Complex64) + +def _complex(real: Annotated[Any, TV_Complex_T], imag: Annotated[Any, TV_Complex_T], Tout:TV_Complex_Tout=_dtypes.complex64, name=None) -> Annotated[Any, TV_Complex_Tout]: + r"""Converts two real numbers to a complex number. + + Given a tensor `real` representing the real part of a complex number, and a + tensor `imag` representing the imaginary part of a complex number, this + operation returns complex numbers elementwise of the form \\(a + bj\\), where + *a* represents the `real` part and *b* represents the `imag` part. + + The input tensors `real` and `imag` must have the same shape. + + For example: + + ``` + # tensor 'real' is [2.25, 3.25] + # tensor `imag` is [4.75, 5.75] + tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + ``` + + Args: + real: A `Tensor`. Must be one of the following types: `float32`, `float64`. + imag: A `Tensor`. Must have the same type as `real`. + Tout: An optional `tf.DType` from: `tf.complex64, tf.complex128`. Defaults to `tf.complex64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Complex", name, real, imag, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _complex_eager_fallback( + real, imag, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tout is None: + Tout = _dtypes.complex64 + Tout = _execute.make_type(Tout, "Tout") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Complex", real=real, imag=imag, Tout=Tout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tout", + _op._get_attr_type("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Complex", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Complex = tf_export("raw_ops.Complex")(_ops.to_raw_op(_complex)) + + +def _complex_eager_fallback(real: Annotated[Any, TV_Complex_T], imag: Annotated[Any, TV_Complex_T], Tout: TV_Complex_Tout, name, ctx) -> Annotated[Any, TV_Complex_Tout]: + if Tout is None: + Tout = _dtypes.complex64 + Tout = _execute.make_type(Tout, "Tout") + _attr_T, _inputs_T = _execute.args_to_matching_eager([real, imag], ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + (real, imag) = _inputs_T + _inputs_flat = [real, imag] + _attrs = ("T", _attr_T, "Tout", Tout) + _result = _execute.execute(b"Complex", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Complex", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ComplexAbs_T = TypeVar("TV_ComplexAbs_T", _atypes.Complex128, _atypes.Complex64) +TV_ComplexAbs_Tout = TypeVar("TV_ComplexAbs_Tout", _atypes.Float32, _atypes.Float64) + +def complex_abs(x: Annotated[Any, TV_ComplexAbs_T], Tout:TV_ComplexAbs_Tout=_dtypes.float32, name=None) -> Annotated[Any, TV_ComplexAbs_Tout]: + r"""Computes the complex absolute value of a tensor. + + Given a tensor `x` of complex numbers, this operation returns a tensor of type + `float` or `double` that is the absolute value of each element in `x`. All + elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute + value is computed as \\( \sqrt{a^2 + b^2}\\). + + For example: + + >>> x = tf.complex(3.0, 4.0) + >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + 5.0 + + Args: + x: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. + Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ComplexAbs", name, x, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return complex_abs_eager_fallback( + x, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ComplexAbs", x=x, Tout=Tout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tout", + _op._get_attr_type("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ComplexAbs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ComplexAbs = tf_export("raw_ops.ComplexAbs")(_ops.to_raw_op(complex_abs)) + + +def complex_abs_eager_fallback(x: Annotated[Any, TV_ComplexAbs_T], Tout: TV_ComplexAbs_Tout, name, ctx) -> Annotated[Any, TV_ComplexAbs_Tout]: + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.complex64, _dtypes.complex128, ], _dtypes.complex64) + _inputs_flat = [x] + _attrs = ("T", _attr_T, "Tout", Tout) + _result = _execute.execute(b"ComplexAbs", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ComplexAbs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Conj_T = TypeVar("TV_Conj_T", _atypes.Complex128, _atypes.Complex64, _atypes.Variant) + +def conj(input: Annotated[Any, TV_Conj_T], name=None) -> Annotated[Any, TV_Conj_T]: + r"""Returns the complex conjugate of a complex number. + + Given a tensor `input` of complex numbers, this operation returns a tensor of + complex numbers that are the complex conjugate of each element in `input`. The + complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the + real part and *b* is the imaginary part. + + The complex conjugate returned by this operation is of the form \\(a - bj\\). + + For example: + + ``` + # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`, `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Conj", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return conj_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Conj", input=input, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Conj", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Conj = tf_export("raw_ops.Conj")(_ops.to_raw_op(conj)) + + +def conj_eager_fallback(input: Annotated[Any, TV_Conj_T], name, ctx) -> Annotated[Any, TV_Conj_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.complex64, _dtypes.complex128, _dtypes.variant, ], _dtypes.complex64) + _inputs_flat = [input] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Conj", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Conj", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cos_T = TypeVar("TV_Cos_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.cos', 'cos') +def cos(x: Annotated[Any, TV_Cos_T], name=None) -> Annotated[Any, TV_Cos_T]: + r"""Computes cos of x element-wise. + + Given an input tensor, this function computes cosine of every + element in the tensor. Input range is `(-inf, inf)` and + output range is `[-1,1]`. If input lies outside the boundary, `nan` + is returned. + + ```python + x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cos", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_cos( + (x, name,), None) + if _result is not NotImplemented: + return _result + return cos_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cos, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_cos( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cos", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cos, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cos = tf_export("raw_ops.Cos")(_ops.to_raw_op(cos)) +_dispatcher_for_cos = cos._tf_type_based_dispatcher.Dispatch + + +def cos_eager_fallback(x: Annotated[Any, TV_Cos_T], name, ctx) -> Annotated[Any, TV_Cos_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Cos", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cos", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cosh_T = TypeVar("TV_Cosh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.cosh', 'cosh') +def cosh(x: Annotated[Any, TV_Cosh_T], name=None) -> Annotated[Any, TV_Cosh_T]: + r"""Computes hyperbolic cosine of x element-wise. + + Given an input tensor, this function computes hyperbolic cosine of every + element in the tensor. Input range is `[-inf, inf]` and output range + is `[1, inf]`. + + ```python + x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cosh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_cosh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return cosh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cosh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_cosh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cosh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cosh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cosh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cosh = tf_export("raw_ops.Cosh")(_ops.to_raw_op(cosh)) +_dispatcher_for_cosh = cosh._tf_type_based_dispatcher.Dispatch + + +def cosh_eager_fallback(x: Annotated[Any, TV_Cosh_T], name, ctx) -> Annotated[Any, TV_Cosh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Cosh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cosh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cross_T = TypeVar("TV_Cross_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('linalg.cross', v1=['linalg.cross', 'cross']) +@deprecated_endpoints('cross') +def cross(a: Annotated[Any, TV_Cross_T], b: Annotated[Any, TV_Cross_T], name=None) -> Annotated[Any, TV_Cross_T]: + r"""Compute the pairwise cross product. + + `a` and `b` must be the same shape; they can either be simple 3-element vectors, + or any shape where the innermost dimension is 3. In the latter case, each pair + of corresponding 3-element vectors is cross-multiplied independently. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + A tensor containing 3-element vectors. + b: A `Tensor`. Must have the same type as `a`. + Another tensor, of same type and shape as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cross", name, a, b) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_cross( + (a, b, name,), None) + if _result is not NotImplemented: + return _result + return cross_eager_fallback( + a, b, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cross, (), dict(a=a, b=b, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_cross( + (a, b, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cross", a=a, b=b, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + cross, (), dict(a=a, b=b, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cross", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cross = tf_export("raw_ops.Cross")(_ops.to_raw_op(cross)) +_dispatcher_for_cross = cross._tf_type_based_dispatcher.Dispatch + + +def cross_eager_fallback(a: Annotated[Any, TV_Cross_T], b: Annotated[Any, TV_Cross_T], name, ctx) -> Annotated[Any, TV_Cross_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (a, b) = _inputs_T + _inputs_flat = [a, b] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Cross", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cross", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cumprod_T = TypeVar("TV_Cumprod_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Cumprod_Tidx = TypeVar("TV_Cumprod_Tidx", _atypes.Int32, _atypes.Int64) + +def cumprod(x: Annotated[Any, TV_Cumprod_T], axis: Annotated[Any, TV_Cumprod_Tidx], exclusive:bool=False, reverse:bool=False, name=None) -> Annotated[Any, TV_Cumprod_T]: + r"""Compute the cumulative product of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumprod, which means that the first + element of the input is identical to the first element of the output: + + ```python + tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + ``` + + By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + performed instead: + + ```python + tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + ``` + + By setting the `reverse` kwarg to `True`, the cumprod is performed in the + opposite direction: + + ```python + tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + ``` + + This is more efficient than using separate `tf.reverse` ops. + + The `reverse` and `exclusive` kwargs can also be combined: + + ```python + tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + `complex128`, `qint8`, `quint8`, `qint32`, `half`. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A `Tensor` of type `int32` (default: 0). Must be in the range + `[-rank(x), rank(x))`. + exclusive: An optional `bool`. Defaults to `False`. + If `True`, perform exclusive cumprod. + reverse: An optional `bool`. Defaults to `False`. + A `bool` (default: False). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cumprod", name, x, axis, "exclusive", exclusive, "reverse", + reverse) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cumprod_eager_fallback( + x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cumprod", x=x, axis=axis, exclusive=exclusive, reverse=reverse, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("exclusive", _op._get_attr_bool("exclusive"), "reverse", + _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), + "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cumprod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cumprod = tf_export("raw_ops.Cumprod")(_ops.to_raw_op(cumprod)) + + +def cumprod_eager_fallback(x: Annotated[Any, TV_Cumprod_T], axis: Annotated[Any, TV_Cumprod_Tidx], exclusive: bool, reverse: bool, name, ctx) -> Annotated[Any, TV_Cumprod_T]: + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [x, axis] + _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx", + _attr_Tidx) + _result = _execute.execute(b"Cumprod", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cumprod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Cumsum_T = TypeVar("TV_Cumsum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Cumsum_Tidx = TypeVar("TV_Cumsum_Tidx", _atypes.Int32, _atypes.Int64) + +def cumsum(x: Annotated[Any, TV_Cumsum_T], axis: Annotated[Any, TV_Cumsum_Tidx], exclusive:bool=False, reverse:bool=False, name=None) -> Annotated[Any, TV_Cumsum_T]: + r"""Compute the cumulative sum of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumsum, which means that the first + element of the input is identical to the first element of the output: + + ```python + tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + ``` + + By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + performed instead: + + ```python + tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + ``` + + By setting the `reverse` kwarg to `True`, the cumsum is performed in the + opposite direction: + + ```python + tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + ``` + + This is more efficient than using separate `tf.reverse` ops. + + The `reverse` and `exclusive` kwargs can also be combined: + + ```python + tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + A `Tensor`. Must be one of the following types: `float32`, `float64`, + `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + `complex128`, `qint8`, `quint8`, `qint32`, `half`. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A `Tensor` of type `int32` (default: 0). Must be in the range + `[-rank(x), rank(x))`. + exclusive: An optional `bool`. Defaults to `False`. + If `True`, perform exclusive cumsum. + reverse: An optional `bool`. Defaults to `False`. + A `bool` (default: False). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Cumsum", name, x, axis, "exclusive", exclusive, "reverse", + reverse) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cumsum_eager_fallback( + x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Cumsum", x=x, axis=axis, exclusive=exclusive, reverse=reverse, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("exclusive", _op._get_attr_bool("exclusive"), "reverse", + _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), + "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Cumsum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Cumsum = tf_export("raw_ops.Cumsum")(_ops.to_raw_op(cumsum)) + + +def cumsum_eager_fallback(x: Annotated[Any, TV_Cumsum_T], axis: Annotated[Any, TV_Cumsum_Tidx], exclusive: bool, reverse: bool, name, ctx) -> Annotated[Any, TV_Cumsum_T]: + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [x, axis] + _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx", + _attr_Tidx) + _result = _execute.execute(b"Cumsum", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Cumsum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_CumulativeLogsumexp_T = TypeVar("TV_CumulativeLogsumexp_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_CumulativeLogsumexp_Tidx = TypeVar("TV_CumulativeLogsumexp_Tidx", _atypes.Int32, _atypes.Int64) + +def cumulative_logsumexp(x: Annotated[Any, TV_CumulativeLogsumexp_T], axis: Annotated[Any, TV_CumulativeLogsumexp_Tidx], exclusive:bool=False, reverse:bool=False, name=None) -> Annotated[Any, TV_CumulativeLogsumexp_T]: + r"""Compute the cumulative product of the tensor `x` along `axis`. + + By default, this op performs an inclusive cumulative log-sum-exp, + which means that the first + element of the input is identical to the first element of the output: + ```python + tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] + ``` + + By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is + performed instead: + ```python + tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] + ``` + Note that the neutral element of the log-sum-exp operation is `-inf`, + however, for performance reasons, the minimal value representable by the + floating point type is used instead. + + By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the + opposite direction. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A `Tensor` of type `int32` (default: 0). Must be in the range + `[-rank(x), rank(x))`. + exclusive: An optional `bool`. Defaults to `False`. + If `True`, perform exclusive cumulative log-sum-exp. + reverse: An optional `bool`. Defaults to `False`. + A `bool` (default: False). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CumulativeLogsumexp", name, x, axis, "exclusive", exclusive, + "reverse", reverse) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cumulative_logsumexp_eager_fallback( + x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CumulativeLogsumexp", x=x, axis=axis, exclusive=exclusive, + reverse=reverse, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("exclusive", _op._get_attr_bool("exclusive"), "reverse", + _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), + "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CumulativeLogsumexp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CumulativeLogsumexp = tf_export("raw_ops.CumulativeLogsumexp")(_ops.to_raw_op(cumulative_logsumexp)) + + +def cumulative_logsumexp_eager_fallback(x: Annotated[Any, TV_CumulativeLogsumexp_T], axis: Annotated[Any, TV_CumulativeLogsumexp_Tidx], exclusive: bool, reverse: bool, name, ctx) -> Annotated[Any, TV_CumulativeLogsumexp_T]: + if exclusive is None: + exclusive = False + exclusive = _execute.make_bool(exclusive, "exclusive") + if reverse is None: + reverse = False + reverse = _execute.make_bool(reverse, "reverse") + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [x, axis] + _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx", + _attr_Tidx) + _result = _execute.execute(b"CumulativeLogsumexp", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CumulativeLogsumexp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_DenseBincount_Tidx = TypeVar("TV_DenseBincount_Tidx", _atypes.Int32, _atypes.Int64) +TV_DenseBincount_T = TypeVar("TV_DenseBincount_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def dense_bincount(input: Annotated[Any, TV_DenseBincount_Tidx], size: Annotated[Any, TV_DenseBincount_Tidx], weights: Annotated[Any, TV_DenseBincount_T], binary_output:bool=False, name=None) -> Annotated[Any, TV_DenseBincount_T]: + r"""Counts the number of occurrences of each value in an integer array. + + Outputs a vector with length `size` and the same dtype as `weights`. If + `weights` are empty, then index `i` stores the number of times the value `i` is + counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + the value in `weights` at each index where the corresponding value in `arr` is + `i`. + + Values in `arr` outside of the range [0, size) are ignored. + + Args: + input: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 1D or 2D int `Tensor`. + size: A `Tensor`. Must have the same type as `input`. + non-negative int scalar `Tensor`. + weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + is an int32, int64, float32, or float64 `Tensor` with the same + shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + equal to 1. + binary_output: An optional `bool`. Defaults to `False`. + bool; Whether the kernel should count the appearance or number of occurrences. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `weights`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DenseBincount", name, input, size, weights, "binary_output", + binary_output) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dense_bincount_eager_fallback( + input, size, weights, binary_output=binary_output, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DenseBincount", input=input, size=size, weights=weights, + binary_output=binary_output, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tidx", _op._get_attr_type("Tidx"), "T", + _op._get_attr_type("T"), "binary_output", + _op._get_attr_bool("binary_output")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DenseBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DenseBincount = tf_export("raw_ops.DenseBincount")(_ops.to_raw_op(dense_bincount)) + + +def dense_bincount_eager_fallback(input: Annotated[Any, TV_DenseBincount_Tidx], size: Annotated[Any, TV_DenseBincount_Tidx], weights: Annotated[Any, TV_DenseBincount_T], binary_output: bool, name, ctx) -> Annotated[Any, TV_DenseBincount_T]: + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([input, size], ctx, [_dtypes.int32, _dtypes.int64, ]) + (input, size) = _inputs_Tidx + _attr_T, (weights,) = _execute.args_to_matching_eager([weights], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [input, size, weights] + _attrs = ("Tidx", _attr_Tidx, "T", _attr_T, "binary_output", binary_output) + _result = _execute.execute(b"DenseBincount", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DenseBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Digamma_T = TypeVar("TV_Digamma_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.digamma', v1=['math.digamma', 'digamma']) +@deprecated_endpoints('digamma') +def digamma(x: Annotated[Any, TV_Digamma_T], name=None) -> Annotated[Any, TV_Digamma_T]: + r"""Computes Psi, the derivative of Lgamma (the log of the absolute value of + + `Gamma(x)`), element-wise. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Digamma", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_digamma( + (x, name,), None) + if _result is not NotImplemented: + return _result + return digamma_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + digamma, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_digamma( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Digamma", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + digamma, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Digamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Digamma = tf_export("raw_ops.Digamma")(_ops.to_raw_op(digamma)) +_dispatcher_for_digamma = digamma._tf_type_based_dispatcher.Dispatch + + +def digamma_eager_fallback(x: Annotated[Any, TV_Digamma_T], name, ctx) -> Annotated[Any, TV_Digamma_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Digamma", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Digamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Div_T = TypeVar("TV_Div_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def div(x: Annotated[Any, TV_Div_T], y: Annotated[Any, TV_Div_T], name=None) -> Annotated[Any, TV_Div_T]: + r"""Returns x / y element-wise. + + *NOTE*: `Div` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Div", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return div_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Div", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Div", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Div = tf_export("raw_ops.Div")(_ops.to_raw_op(div)) + + +def div_eager_fallback(x: Annotated[Any, TV_Div_T], y: Annotated[Any, TV_Div_T], name, ctx) -> Annotated[Any, TV_Div_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.uint32, _dtypes.uint64, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Div", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Div", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_DivNoNan_T = TypeVar("TV_DivNoNan_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def div_no_nan(x: Annotated[Any, TV_DivNoNan_T], y: Annotated[Any, TV_DivNoNan_T], name=None) -> Annotated[Any, TV_DivNoNan_T]: + r"""Returns 0 if the denominator is zero. + + + *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `half`, `float32`, `bfloat16`, `float64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DivNoNan", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return div_no_nan_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DivNoNan", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DivNoNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DivNoNan = tf_export("raw_ops.DivNoNan")(_ops.to_raw_op(div_no_nan)) + + +def div_no_nan_eager_fallback(x: Annotated[Any, TV_DivNoNan_T], y: Annotated[Any, TV_DivNoNan_T], name, ctx) -> Annotated[Any, TV_DivNoNan_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.half, _dtypes.float32, _dtypes.bfloat16, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"DivNoNan", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DivNoNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Equal_T = TypeVar("TV_Equal_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def equal(x: Annotated[Any, TV_Equal_T], y: Annotated[Any, TV_Equal_T], incompatible_shape_error:bool=True, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x == y) element-wise. + + *NOTE*: `Equal` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + ```python + x = tf.constant([2, 4]) + y = tf.constant(2) + tf.math.equal(x, y) ==> array([True, False]) + + x = tf.constant([2, 4]) + y = tf.constant([2, 4]) + tf.math.equal(x, y) ==> array([True, True]) + ``` + + Args: + x: A `Tensor`. + y: A `Tensor`. Must have the same type as `x`. + incompatible_shape_error: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Equal", name, x, y, "incompatible_shape_error", + incompatible_shape_error) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return equal_eager_fallback( + x, y, incompatible_shape_error=incompatible_shape_error, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if incompatible_shape_error is None: + incompatible_shape_error = True + incompatible_shape_error = _execute.make_bool(incompatible_shape_error, "incompatible_shape_error") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Equal", x=x, y=y, incompatible_shape_error=incompatible_shape_error, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "incompatible_shape_error", + _op._get_attr_bool("incompatible_shape_error")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Equal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Equal = tf_export("raw_ops.Equal")(_ops.to_raw_op(equal)) + + +def equal_eager_fallback(x: Annotated[Any, TV_Equal_T], y: Annotated[Any, TV_Equal_T], incompatible_shape_error: bool, name, ctx) -> Annotated[Any, _atypes.Bool]: + if incompatible_shape_error is None: + incompatible_shape_error = True + incompatible_shape_error = _execute.make_bool(incompatible_shape_error, "incompatible_shape_error") + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, []) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T, "incompatible_shape_error", + incompatible_shape_error) + _result = _execute.execute(b"Equal", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Equal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Erf_T = TypeVar("TV_Erf_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.erf', v1=['math.erf', 'erf']) +@deprecated_endpoints('erf') +def erf(x: Annotated[Any, TV_Erf_T], name=None) -> Annotated[Any, TV_Erf_T]: + r"""Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. + + For example: + + >>> tf.math.erf([[1.0, 2.0, 3.0], [0.0, -1.0, -2.0]]) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Erf", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_erf( + (x, name,), None) + if _result is not NotImplemented: + return _result + return erf_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + erf, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_erf( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Erf", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + erf, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Erf", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Erf = tf_export("raw_ops.Erf")(_ops.to_raw_op(erf)) +_dispatcher_for_erf = erf._tf_type_based_dispatcher.Dispatch + + +def erf_eager_fallback(x: Annotated[Any, TV_Erf_T], name, ctx) -> Annotated[Any, TV_Erf_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Erf", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Erf", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Erfc_T = TypeVar("TV_Erfc_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.erfc', v1=['math.erfc', 'erfc']) +@deprecated_endpoints('erfc') +def erfc(x: Annotated[Any, TV_Erfc_T], name=None) -> Annotated[Any, TV_Erfc_T]: + r"""Computes the complementary error function of `x` element-wise. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Erfc", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_erfc( + (x, name,), None) + if _result is not NotImplemented: + return _result + return erfc_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + erfc, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_erfc( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Erfc", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + erfc, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Erfc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Erfc = tf_export("raw_ops.Erfc")(_ops.to_raw_op(erfc)) +_dispatcher_for_erfc = erfc._tf_type_based_dispatcher.Dispatch + + +def erfc_eager_fallback(x: Annotated[Any, TV_Erfc_T], name, ctx) -> Annotated[Any, TV_Erfc_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Erfc", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Erfc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Erfinv_T = TypeVar("TV_Erfinv_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def erfinv(x: Annotated[Any, TV_Erfinv_T], name=None) -> Annotated[Any, TV_Erfinv_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Erfinv", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return erfinv_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Erfinv", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Erfinv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Erfinv = tf_export("raw_ops.Erfinv")(_ops.to_raw_op(erfinv)) + + +def erfinv_eager_fallback(x: Annotated[Any, TV_Erfinv_T], name, ctx) -> Annotated[Any, TV_Erfinv_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Erfinv", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Erfinv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_EuclideanNorm_T = TypeVar("TV_EuclideanNorm_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_EuclideanNorm_Tidx = TypeVar("TV_EuclideanNorm_Tidx", _atypes.Int32, _atypes.Int64) + +def euclidean_norm(input: Annotated[Any, TV_EuclideanNorm_T], axis: Annotated[Any, TV_EuclideanNorm_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_EuclideanNorm_T]: + r"""Computes the euclidean norm of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EuclideanNorm", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return euclidean_norm_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EuclideanNorm", input=input, reduction_indices=axis, + keep_dims=keep_dims, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "EuclideanNorm", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +EuclideanNorm = tf_export("raw_ops.EuclideanNorm")(_ops.to_raw_op(euclidean_norm)) + + +def euclidean_norm_eager_fallback(input: Annotated[Any, TV_EuclideanNorm_T], axis: Annotated[Any, TV_EuclideanNorm_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_EuclideanNorm_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"EuclideanNorm", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "EuclideanNorm", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Exp_T = TypeVar("TV_Exp_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def exp(x: Annotated[Any, TV_Exp_T], name=None) -> Annotated[Any, TV_Exp_T]: + r"""Computes exponential of x element-wise. \\(y = e^x\\). + + This function computes the exponential of every element in the input tensor. + i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + `e` denotes Euler's number and is approximately equal to 2.718281. + Output is positive for any real input. + + ```python + x = tf.constant(2.0) + tf.math.exp(x) ==> 7.389056 + + x = tf.constant([2.0, 8.0]) + tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + ``` + + For complex numbers, the exponential value is calculated as follows: + + ``` + e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + ``` + + Let's consider complex number 1+1j as an example. + e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + + ```python + x = tf.constant(1 + 1j) + tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Exp", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return exp_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Exp", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Exp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Exp = tf_export("raw_ops.Exp")(_ops.to_raw_op(exp)) + + +def exp_eager_fallback(x: Annotated[Any, TV_Exp_T], name, ctx) -> Annotated[Any, TV_Exp_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Exp", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Exp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Expm1_T = TypeVar("TV_Expm1_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.expm1', v1=['math.expm1', 'expm1']) +@deprecated_endpoints('expm1') +def expm1(x: Annotated[Any, TV_Expm1_T], name=None) -> Annotated[Any, TV_Expm1_T]: + r"""Computes `exp(x) - 1` element-wise. + + i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + `e` denotes Euler's number and is approximately equal to 2.718281. + + ```python + x = tf.constant(2.0) + tf.math.expm1(x) ==> 6.389056 + + x = tf.constant([2.0, 8.0]) + tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + + x = tf.constant(1 + 1j) + tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Expm1", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_expm1( + (x, name,), None) + if _result is not NotImplemented: + return _result + return expm1_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + expm1, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_expm1( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Expm1", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + expm1, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Expm1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Expm1 = tf_export("raw_ops.Expm1")(_ops.to_raw_op(expm1)) +_dispatcher_for_expm1 = expm1._tf_type_based_dispatcher.Dispatch + + +def expm1_eager_fallback(x: Annotated[Any, TV_Expm1_T], name, ctx) -> Annotated[Any, TV_Expm1_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Expm1", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Expm1", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Floor_T = TypeVar("TV_Floor_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def floor(x: Annotated[Any, TV_Floor_T], name=None) -> Annotated[Any, TV_Floor_T]: + r"""Returns element-wise largest integer not greater than x. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Floor", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return floor_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Floor", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Floor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Floor = tf_export("raw_ops.Floor")(_ops.to_raw_op(floor)) + + +def floor_eager_fallback(x: Annotated[Any, TV_Floor_T], name, ctx) -> Annotated[Any, TV_Floor_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Floor", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Floor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_FloorDiv_T = TypeVar("TV_FloorDiv_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export(v1=['floor_div']) +@deprecated_endpoints('floor_div') +def floor_div(x: Annotated[Any, TV_FloorDiv_T], y: Annotated[Any, TV_FloorDiv_T], name=None) -> Annotated[Any, TV_FloorDiv_T]: + r"""Returns x // y element-wise. + + *NOTE*: `floor_div` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FloorDiv", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_floor_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return floor_div_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + floor_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_floor_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FloorDiv", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + floor_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FloorDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FloorDiv = tf_export("raw_ops.FloorDiv")(_ops.to_raw_op(floor_div)) +_dispatcher_for_floor_div = floor_div._tf_type_based_dispatcher.Dispatch + + +def floor_div_eager_fallback(x: Annotated[Any, TV_FloorDiv_T], y: Annotated[Any, TV_FloorDiv_T], name, ctx) -> Annotated[Any, TV_FloorDiv_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.uint32, _dtypes.uint64, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"FloorDiv", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FloorDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_FloorMod_T = TypeVar("TV_FloorMod_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.floormod', 'math.mod', v1=['math.floormod', 'floormod', 'math.mod', 'mod']) +@deprecated_endpoints('floormod', 'mod') +def floor_mod(x: Annotated[Any, TV_FloorMod_T], y: Annotated[Any, TV_FloorMod_T], name=None) -> Annotated[Any, TV_FloorMod_T]: + r"""Returns element-wise remainder of division. + + This follows Python semantics in that the + result here is consistent with a flooring divide. E.g. + `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y. + + *NOTE*: `math.floormod` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `bfloat16`, `half`, `float32`, `float64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "FloorMod", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_floor_mod( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return floor_mod_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + floor_mod, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_floor_mod( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "FloorMod", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + floor_mod, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "FloorMod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +FloorMod = tf_export("raw_ops.FloorMod")(_ops.to_raw_op(floor_mod)) +_dispatcher_for_floor_mod = floor_mod._tf_type_based_dispatcher.Dispatch + + +def floor_mod_eager_fallback(x: Annotated[Any, TV_FloorMod_T], y: Annotated[Any, TV_FloorMod_T], name, ctx) -> Annotated[Any, TV_FloorMod_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, _dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"FloorMod", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "FloorMod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Greater_T = TypeVar("TV_Greater_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.greater', 'greater') +def greater(x: Annotated[Any, TV_Greater_T], y: Annotated[Any, TV_Greater_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x > y) element-wise. + + *NOTE*: `math.greater` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Example: + + ```python + x = tf.constant([5, 4, 6]) + y = tf.constant([5, 2, 5]) + tf.math.greater(x, y) ==> [False, True, True] + + x = tf.constant([5, 4, 6]) + y = tf.constant([5]) + tf.math.greater(x, y) ==> [False, False, True] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Greater", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_greater( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return greater_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + greater, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_greater( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Greater", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + greater, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Greater", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Greater = tf_export("raw_ops.Greater")(_ops.to_raw_op(greater)) +_dispatcher_for_greater = greater._tf_type_based_dispatcher.Dispatch + + +def greater_eager_fallback(x: Annotated[Any, TV_Greater_T], y: Annotated[Any, TV_Greater_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Greater", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Greater", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_GreaterEqual_T = TypeVar("TV_GreaterEqual_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.greater_equal', 'greater_equal') +def greater_equal(x: Annotated[Any, TV_GreaterEqual_T], y: Annotated[Any, TV_GreaterEqual_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x >= y) element-wise. + + *NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Example: + + ```python + x = tf.constant([5, 4, 6, 7]) + y = tf.constant([5, 2, 5, 10]) + tf.math.greater_equal(x, y) ==> [True, True, True, False] + + x = tf.constant([5, 4, 6, 7]) + y = tf.constant([5]) + tf.math.greater_equal(x, y) ==> [True, False, True, True] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GreaterEqual", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_greater_equal( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return greater_equal_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + greater_equal, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_greater_equal( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GreaterEqual", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + greater_equal, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "GreaterEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +GreaterEqual = tf_export("raw_ops.GreaterEqual")(_ops.to_raw_op(greater_equal)) +_dispatcher_for_greater_equal = greater_equal._tf_type_based_dispatcher.Dispatch + + +def greater_equal_eager_fallback(x: Annotated[Any, TV_GreaterEqual_T], y: Annotated[Any, TV_GreaterEqual_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"GreaterEqual", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GreaterEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_HistogramFixedWidth_T = TypeVar("TV_HistogramFixedWidth_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) +TV_HistogramFixedWidth_dtype = TypeVar("TV_HistogramFixedWidth_dtype", _atypes.Int32, _atypes.Int64) + +def _histogram_fixed_width(values: Annotated[Any, TV_HistogramFixedWidth_T], value_range: Annotated[Any, TV_HistogramFixedWidth_T], nbins: Annotated[Any, _atypes.Int32], dtype:TV_HistogramFixedWidth_dtype=_dtypes.int32, name=None) -> Annotated[Any, TV_HistogramFixedWidth_dtype]: + r"""Return histogram of values. + + Given the tensor `values`, this operation returns a rank 1 histogram counting + the number of entries in `values` that fall into every bin. The bins are + equal width and determined by the arguments `value_range` and `nbins`. + + ```python + # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + nbins = 5 + value_range = [0.0, 5.0] + new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + + with tf.get_default_session() as sess: + hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + variables.global_variables_initializer().run() + sess.run(hist) => [2, 1, 1, 0, 2] + ``` + + Args: + values: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + Numeric `Tensor`. + value_range: A `Tensor`. Must have the same type as `values`. + Shape [2] `Tensor` of same `dtype` as `values`. + values <= value_range[0] will be mapped to hist[0], + values >= value_range[1] will be mapped to hist[-1]. + nbins: A `Tensor` of type `int32`. + Scalar `int32 Tensor`. Number of histogram bins. + dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "HistogramFixedWidth", name, values, value_range, nbins, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _histogram_fixed_width_eager_fallback( + values, value_range, nbins, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.int32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "HistogramFixedWidth", values=values, value_range=value_range, + nbins=nbins, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "dtype", + _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "HistogramFixedWidth", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +HistogramFixedWidth = tf_export("raw_ops.HistogramFixedWidth")(_ops.to_raw_op(_histogram_fixed_width)) + + +def _histogram_fixed_width_eager_fallback(values: Annotated[Any, TV_HistogramFixedWidth_T], value_range: Annotated[Any, TV_HistogramFixedWidth_T], nbins: Annotated[Any, _atypes.Int32], dtype: TV_HistogramFixedWidth_dtype, name, ctx) -> Annotated[Any, TV_HistogramFixedWidth_dtype]: + if dtype is None: + dtype = _dtypes.int32 + dtype = _execute.make_type(dtype, "dtype") + _attr_T, _inputs_T = _execute.args_to_matching_eager([values, value_range], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + (values, value_range) = _inputs_T + nbins = _ops.convert_to_tensor(nbins, _dtypes.int32) + _inputs_flat = [values, value_range, nbins] + _attrs = ("T", _attr_T, "dtype", dtype) + _result = _execute.execute(b"HistogramFixedWidth", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "HistogramFixedWidth", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Igamma_T = TypeVar("TV_Igamma_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.igamma', v1=['math.igamma', 'igamma']) +@deprecated_endpoints('igamma') +def igamma(a: Annotated[Any, TV_Igamma_T], x: Annotated[Any, TV_Igamma_T], name=None) -> Annotated[Any, TV_Igamma_T]: + r"""Compute the lower regularized incomplete Gamma function `P(a, x)`. + + The lower regularized incomplete Gamma function is defined as: + + + \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) + + where + + \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) + + is the lower incomplete Gamma function. + + Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + Gamma function. + + Args: + a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + x: A `Tensor`. Must have the same type as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Igamma", name, a, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_igamma( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + return igamma_eager_fallback( + a, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + igamma, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_igamma( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Igamma", a=a, x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + igamma, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Igamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Igamma = tf_export("raw_ops.Igamma")(_ops.to_raw_op(igamma)) +_dispatcher_for_igamma = igamma._tf_type_based_dispatcher.Dispatch + + +def igamma_eager_fallback(a: Annotated[Any, TV_Igamma_T], x: Annotated[Any, TV_Igamma_T], name, ctx) -> Annotated[Any, TV_Igamma_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (a, x) = _inputs_T + _inputs_flat = [a, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Igamma", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Igamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_IgammaGradA_T = TypeVar("TV_IgammaGradA_T", _atypes.Float32, _atypes.Float64) + +def igamma_grad_a(a: Annotated[Any, TV_IgammaGradA_T], x: Annotated[Any, TV_IgammaGradA_T], name=None) -> Annotated[Any, TV_IgammaGradA_T]: + r"""Computes the gradient of `igamma(a, x)` wrt `a`. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`. + x: A `Tensor`. Must have the same type as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IgammaGradA", name, a, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return igamma_grad_a_eager_fallback( + a, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IgammaGradA", a=a, x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IgammaGradA", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IgammaGradA = tf_export("raw_ops.IgammaGradA")(_ops.to_raw_op(igamma_grad_a)) + + +def igamma_grad_a_eager_fallback(a: Annotated[Any, TV_IgammaGradA_T], x: Annotated[Any, TV_IgammaGradA_T], name, ctx) -> Annotated[Any, TV_IgammaGradA_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], ctx, [_dtypes.float32, _dtypes.float64, ]) + (a, x) = _inputs_T + _inputs_flat = [a, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"IgammaGradA", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IgammaGradA", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Igammac_T = TypeVar("TV_Igammac_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.igammac', v1=['math.igammac', 'igammac']) +@deprecated_endpoints('igammac') +def igammac(a: Annotated[Any, TV_Igammac_T], x: Annotated[Any, TV_Igammac_T], name=None) -> Annotated[Any, TV_Igammac_T]: + r"""Compute the upper regularized incomplete Gamma function `Q(a, x)`. + + The upper regularized incomplete Gamma function is defined as: + + \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) + + where + + \\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\) + + is the upper incomplete Gamma function. + + Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + Gamma function. + + Args: + a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + x: A `Tensor`. Must have the same type as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Igammac", name, a, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_igammac( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + return igammac_eager_fallback( + a, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + igammac, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_igammac( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Igammac", a=a, x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + igammac, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Igammac", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Igammac = tf_export("raw_ops.Igammac")(_ops.to_raw_op(igammac)) +_dispatcher_for_igammac = igammac._tf_type_based_dispatcher.Dispatch + + +def igammac_eager_fallback(a: Annotated[Any, TV_Igammac_T], x: Annotated[Any, TV_Igammac_T], name, ctx) -> Annotated[Any, TV_Igammac_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (a, x) = _inputs_T + _inputs_flat = [a, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Igammac", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Igammac", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Imag_T = TypeVar("TV_Imag_T", _atypes.Complex128, _atypes.Complex64) +TV_Imag_Tout = TypeVar("TV_Imag_Tout", _atypes.Float32, _atypes.Float64) + +def imag(input: Annotated[Any, TV_Imag_T], Tout:TV_Imag_Tout=_dtypes.float32, name=None) -> Annotated[Any, TV_Imag_Tout]: + r"""Returns the imaginary part of a complex number. + + Given a tensor `input` of complex numbers, this operation returns a tensor of + type `float` that is the imaginary part of each element in `input`. All + elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* + is the real part and *b* is the imaginary part returned by this operation. + + For example: + + ``` + # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.imag(input) ==> [4.75, 5.75] + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. + Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Imag", name, input, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return imag_eager_fallback( + input, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Imag", input=input, Tout=Tout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tout", + _op._get_attr_type("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Imag", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Imag = tf_export("raw_ops.Imag")(_ops.to_raw_op(imag)) + + +def imag_eager_fallback(input: Annotated[Any, TV_Imag_T], Tout: TV_Imag_Tout, name, ctx) -> Annotated[Any, TV_Imag_Tout]: + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.complex64, _dtypes.complex128, ], _dtypes.complex64) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "Tout", Tout) + _result = _execute.execute(b"Imag", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Imag", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Inv_T = TypeVar("TV_Inv_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +def inv(x: Annotated[Any, TV_Inv_T], name=None) -> Annotated[Any, TV_Inv_T]: + r"""Computes the reciprocal of x element-wise. + + I.e., \\(y = 1 / x\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Inv", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return inv_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Inv", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Inv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Inv = tf_export("raw_ops.Inv")(_ops.to_raw_op(inv)) + + +def inv_eager_fallback(x: Annotated[Any, TV_Inv_T], name, ctx) -> Annotated[Any, TV_Inv_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Inv", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Inv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_InvGrad_T = TypeVar("TV_InvGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def inv_grad(y: Annotated[Any, TV_InvGrad_T], dy: Annotated[Any, TV_InvGrad_T], name=None) -> Annotated[Any, TV_InvGrad_T]: + r"""Computes the gradient for the inverse of `x` wrt its input. + + Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InvGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return inv_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InvGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "InvGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +InvGrad = tf_export("raw_ops.InvGrad")(_ops.to_raw_op(inv_grad)) + + +def inv_grad_eager_fallback(y: Annotated[Any, TV_InvGrad_T], dy: Annotated[Any, TV_InvGrad_T], name, ctx) -> Annotated[Any, TV_InvGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"InvGrad", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "InvGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_IsFinite_T = TypeVar("TV_IsFinite_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.is_finite', v1=['math.is_finite', 'debugging.is_finite', 'is_finite']) +@deprecated_endpoints('debugging.is_finite', 'is_finite') +def is_finite(x: Annotated[Any, TV_IsFinite_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns which elements of x are finite. + + @compatibility(numpy) + Equivalent to np.isfinite + @end_compatibility + + Example: + + ```python + x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + tf.math.is_finite(x) ==> [True, True, True, False, False] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsFinite", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_is_finite( + (x, name,), None) + if _result is not NotImplemented: + return _result + return is_finite_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_finite, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_is_finite( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsFinite", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_finite, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsFinite", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsFinite = tf_export("raw_ops.IsFinite")(_ops.to_raw_op(is_finite)) +_dispatcher_for_is_finite = is_finite._tf_type_based_dispatcher.Dispatch + + +def is_finite_eager_fallback(x: Annotated[Any, TV_IsFinite_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"IsFinite", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsFinite", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_IsInf_T = TypeVar("TV_IsInf_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.is_inf', v1=['math.is_inf', 'debugging.is_inf', 'is_inf']) +@deprecated_endpoints('debugging.is_inf', 'is_inf') +def is_inf(x: Annotated[Any, TV_IsInf_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns which elements of x are Inf. + + @compatibility(numpy) + Equivalent to np.isinf + @end_compatibility + + Example: + + ```python + x = tf.constant([5.0, np.inf, 6.8, np.inf]) + tf.math.is_inf(x) ==> [False, True, False, True] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsInf", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_is_inf( + (x, name,), None) + if _result is not NotImplemented: + return _result + return is_inf_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_inf, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_is_inf( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsInf", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_inf, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsInf", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsInf = tf_export("raw_ops.IsInf")(_ops.to_raw_op(is_inf)) +_dispatcher_for_is_inf = is_inf._tf_type_based_dispatcher.Dispatch + + +def is_inf_eager_fallback(x: Annotated[Any, TV_IsInf_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"IsInf", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsInf", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_IsNan_T = TypeVar("TV_IsNan_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.is_nan', v1=['math.is_nan', 'debugging.is_nan', 'is_nan']) +@deprecated_endpoints('debugging.is_nan', 'is_nan') +def is_nan(x: Annotated[Any, TV_IsNan_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns which elements of x are NaN. + + @compatibility(numpy) + Equivalent to np.isnan + @end_compatibility + + Example: + + ```python + x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + tf.math.is_nan(x) ==> [False, True, False, True, False] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsNan", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_is_nan( + (x, name,), None) + if _result is not NotImplemented: + return _result + return is_nan_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_nan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_is_nan( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsNan", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + is_nan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsNan = tf_export("raw_ops.IsNan")(_ops.to_raw_op(is_nan)) +_dispatcher_for_is_nan = is_nan._tf_type_based_dispatcher.Dispatch + + +def is_nan_eager_fallback(x: Annotated[Any, TV_IsNan_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"IsNan", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Less_T = TypeVar("TV_Less_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.less', 'less') +def less(x: Annotated[Any, TV_Less_T], y: Annotated[Any, TV_Less_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x < y) element-wise. + + *NOTE*: `math.less` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Example: + + ```python + x = tf.constant([5, 4, 6]) + y = tf.constant([5]) + tf.math.less(x, y) ==> [False, True, False] + + x = tf.constant([5, 4, 6]) + y = tf.constant([5, 6, 7]) + tf.math.less(x, y) ==> [False, True, True] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Less", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_less( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return less_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + less, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_less( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Less", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + less, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Less", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Less = tf_export("raw_ops.Less")(_ops.to_raw_op(less)) +_dispatcher_for_less = less._tf_type_based_dispatcher.Dispatch + + +def less_eager_fallback(x: Annotated[Any, TV_Less_T], y: Annotated[Any, TV_Less_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Less", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Less", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_LessEqual_T = TypeVar("TV_LessEqual_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.less_equal', 'less_equal') +def less_equal(x: Annotated[Any, TV_LessEqual_T], y: Annotated[Any, TV_LessEqual_T], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x <= y) element-wise. + + *NOTE*: `math.less_equal` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Example: + + ```python + x = tf.constant([5, 4, 6]) + y = tf.constant([5]) + tf.math.less_equal(x, y) ==> [True, True, False] + + x = tf.constant([5, 4, 6]) + y = tf.constant([5, 6, 6]) + tf.math.less_equal(x, y) ==> [True, True, True] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LessEqual", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_less_equal( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return less_equal_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + less_equal, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_less_equal( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LessEqual", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + less_equal, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LessEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LessEqual = tf_export("raw_ops.LessEqual")(_ops.to_raw_op(less_equal)) +_dispatcher_for_less_equal = less_equal._tf_type_based_dispatcher.Dispatch + + +def less_equal_eager_fallback(x: Annotated[Any, TV_LessEqual_T], y: Annotated[Any, TV_LessEqual_T], name, ctx) -> Annotated[Any, _atypes.Bool]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"LessEqual", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LessEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Lgamma_T = TypeVar("TV_Lgamma_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.lgamma', v1=['math.lgamma', 'lgamma']) +@deprecated_endpoints('lgamma') +def lgamma(x: Annotated[Any, TV_Lgamma_T], name=None) -> Annotated[Any, TV_Lgamma_T]: + r"""Computes the log of the absolute value of `Gamma(x)` element-wise. + + For positive numbers, this function computes log((input - 1)!) for every element in the tensor. + `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + + Example: + + ```python + x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Lgamma", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_lgamma( + (x, name,), None) + if _result is not NotImplemented: + return _result + return lgamma_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + lgamma, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_lgamma( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Lgamma", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + lgamma, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Lgamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Lgamma = tf_export("raw_ops.Lgamma")(_ops.to_raw_op(lgamma)) +_dispatcher_for_lgamma = lgamma._tf_type_based_dispatcher.Dispatch + + +def lgamma_eager_fallback(x: Annotated[Any, TV_Lgamma_T], name, ctx) -> Annotated[Any, TV_Lgamma_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Lgamma", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Lgamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_LinSpace_T = TypeVar("TV_LinSpace_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_LinSpace_Tidx = TypeVar("TV_LinSpace_Tidx", _atypes.Int32, _atypes.Int64) + +def lin_space(start: Annotated[Any, TV_LinSpace_T], stop: Annotated[Any, TV_LinSpace_T], num: Annotated[Any, TV_LinSpace_Tidx], name=None) -> Annotated[Any, TV_LinSpace_T]: + r"""Generates values in an interval. + + A sequence of `num` evenly-spaced values are generated beginning at `start`. + If `num > 1`, the values in the sequence increase by + `(stop - start) / (num - 1)`, so that the last one is exactly `stop`. + + For example: + + ``` + tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + ``` + + Args: + start: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + 0-D tensor. First entry in the range. + stop: A `Tensor`. Must have the same type as `start`. + 0-D tensor. Last entry in the range. + num: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 0-D tensor. Number of values to generate. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `start`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LinSpace", name, start, stop, num) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return lin_space_eager_fallback( + start, stop, num, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LinSpace", start=start, stop=stop, num=num, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LinSpace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LinSpace = tf_export("raw_ops.LinSpace")(_ops.to_raw_op(lin_space)) + + +def lin_space_eager_fallback(start: Annotated[Any, TV_LinSpace_T], stop: Annotated[Any, TV_LinSpace_T], num: Annotated[Any, TV_LinSpace_Tidx], name, ctx) -> Annotated[Any, TV_LinSpace_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([start, stop], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (start, stop) = _inputs_T + _attr_Tidx, (num,) = _execute.args_to_matching_eager([num], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [start, stop, num] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"LinSpace", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LinSpace", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Log_T = TypeVar("TV_Log_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.log', v1=['math.log', 'log']) +@deprecated_endpoints('log') +def log(x: Annotated[Any, TV_Log_T], name=None) -> Annotated[Any, TV_Log_T]: + r"""Computes natural logarithm of x element-wise. + + I.e., \\(y = \log_e x\\). + + Example: + >>> x = tf.constant([0, 0.5, 1, 5]) + >>> tf.math.log(x) + + + See: https://en.wikipedia.org/wiki/Logarithm + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Log", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_log( + (x, name,), None) + if _result is not NotImplemented: + return _result + return log_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + log, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_log( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Log", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + log, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Log", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Log = tf_export("raw_ops.Log")(_ops.to_raw_op(log)) +_dispatcher_for_log = log._tf_type_based_dispatcher.Dispatch + + +def log_eager_fallback(x: Annotated[Any, TV_Log_T], name, ctx) -> Annotated[Any, TV_Log_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Log", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Log", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Log1p_T = TypeVar("TV_Log1p_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.log1p', v1=['math.log1p', 'log1p']) +@deprecated_endpoints('log1p') +def log1p(x: Annotated[Any, TV_Log1p_T], name=None) -> Annotated[Any, TV_Log1p_T]: + r"""Computes natural logarithm of (1 + x) element-wise. + + I.e., \\(y = \log_e (1 + x)\\). + + Example: + >>> x = tf.constant([0, 0.5, 1, 5]) + >>> tf.math.log1p(x) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Log1p", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_log1p( + (x, name,), None) + if _result is not NotImplemented: + return _result + return log1p_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + log1p, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_log1p( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Log1p", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + log1p, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Log1p", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Log1p = tf_export("raw_ops.Log1p")(_ops.to_raw_op(log1p)) +_dispatcher_for_log1p = log1p._tf_type_based_dispatcher.Dispatch + + +def log1p_eager_fallback(x: Annotated[Any, TV_Log1p_T], name, ctx) -> Annotated[Any, TV_Log1p_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Log1p", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Log1p", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.logical_and', 'logical_and') +def logical_and(x: Annotated[Any, _atypes.Bool], y: Annotated[Any, _atypes.Bool], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of x AND y element-wise. + + Logical AND function. + + Requires that `x` and `y` have the same shape or have + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shapes. For example, `x` and `y` can be: + + - Two single elements of type `bool`. + - One `tf.Tensor` of type `bool` and one single `bool`, where the result will + be calculated by applying logical AND with the single element to each + element in the larger Tensor. + - Two `tf.Tensor` objects of type `bool` of the same shape. In this case, + the result will be the element-wise logical AND of the two input tensors. + + You can also use the `&` operator instead. + + Usage: + + >>> a = tf.constant([True]) + >>> b = tf.constant([False]) + >>> tf.math.logical_and(a, b) + + >>> a & b + + + >>> c = tf.constant([True]) + >>> x = tf.constant([False, True, True, False]) + >>> tf.math.logical_and(c, x) + + >>> c & x + + + >>> y = tf.constant([False, False, True, True]) + >>> z = tf.constant([False, True, False, True]) + >>> tf.math.logical_and(y, z) + + >>> y & z + + + This op also supports broadcasting + + >>> tf.logical_and([[True, False]], [[True], [False]]) + + + The reduction version of this elementwise operation is `tf.math.reduce_all`. + + Args: + x: A `tf.Tensor` of type bool. + y: A `tf.Tensor` of type bool. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of type bool with the shape that `x` and `y` broadcast to. + + Args: + x: A `Tensor` of type `bool`. + y: A `Tensor` of type `bool`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LogicalAnd", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_logical_and( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return logical_and_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_and, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_logical_and( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LogicalAnd", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_and, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "LogicalAnd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LogicalAnd = tf_export("raw_ops.LogicalAnd")(_ops.to_raw_op(logical_and)) +_dispatcher_for_logical_and = logical_and._tf_type_based_dispatcher.Dispatch + + +def logical_and_eager_fallback(x: Annotated[Any, _atypes.Bool], y: Annotated[Any, _atypes.Bool], name, ctx) -> Annotated[Any, _atypes.Bool]: + x = _ops.convert_to_tensor(x, _dtypes.bool) + y = _ops.convert_to_tensor(y, _dtypes.bool) + _inputs_flat = [x, y] + _attrs = None + _result = _execute.execute(b"LogicalAnd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LogicalAnd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.logical_not', 'logical_not') +def logical_not(x: Annotated[Any, _atypes.Bool], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of `NOT x` element-wise. + + Example: + + >>> tf.math.logical_not(tf.constant([True, False])) + + + Args: + x: A `Tensor` of type `bool`. A `Tensor` of type `bool`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LogicalNot", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_logical_not( + (x, name,), None) + if _result is not NotImplemented: + return _result + return logical_not_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_not, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_logical_not( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LogicalNot", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_not, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "LogicalNot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LogicalNot = tf_export("raw_ops.LogicalNot")(_ops.to_raw_op(logical_not)) +_dispatcher_for_logical_not = logical_not._tf_type_based_dispatcher.Dispatch + + +def logical_not_eager_fallback(x: Annotated[Any, _atypes.Bool], name, ctx) -> Annotated[Any, _atypes.Bool]: + x = _ops.convert_to_tensor(x, _dtypes.bool) + _inputs_flat = [x] + _attrs = None + _result = _execute.execute(b"LogicalNot", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LogicalNot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.logical_or', 'logical_or') +def logical_or(x: Annotated[Any, _atypes.Bool], y: Annotated[Any, _atypes.Bool], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of x OR y element-wise. + + Logical OR function. + + Requires that `x` and `y` have the same shape or have + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shapes. For example, `x` and `y` can be: + + - Two single elements of type `bool`. + - One `tf.Tensor` of type `bool` and one single `bool`, where the result will + be calculated by applying logical OR with the single element to each + element in the larger Tensor. + - Two `tf.Tensor` objects of type `bool` of the same shape. In this case, + the result will be the element-wise logical OR of the two input tensors. + + You can also use the `|` operator instead. + + Usage: + + >>> a = tf.constant([True]) + >>> b = tf.constant([False]) + >>> tf.math.logical_or(a, b) + + >>> a | b + + + >>> c = tf.constant([False]) + >>> x = tf.constant([False, True, True, False]) + >>> tf.math.logical_or(c, x) + + >>> c | x + + + >>> y = tf.constant([False, False, True, True]) + >>> z = tf.constant([False, True, False, True]) + >>> tf.math.logical_or(y, z) + + >>> y | z + + + This op also supports broadcasting + + >>> tf.logical_or([[True, False]], [[True], [False]]) + + + The reduction version of this elementwise operation is `tf.math.reduce_any`. + + Args: + x: A `tf.Tensor` of type bool. + y: A `tf.Tensor` of type bool. + name: A name for the operation (optional). + + Returns: + A `tf.Tensor` of type bool with the shape that `x` and `y` broadcast to. + + Args: + x: A `Tensor` of type `bool`. + y: A `Tensor` of type `bool`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LogicalOr", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_logical_or( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return logical_or_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_or, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_logical_or( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LogicalOr", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + logical_or, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "LogicalOr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LogicalOr = tf_export("raw_ops.LogicalOr")(_ops.to_raw_op(logical_or)) +_dispatcher_for_logical_or = logical_or._tf_type_based_dispatcher.Dispatch + + +def logical_or_eager_fallback(x: Annotated[Any, _atypes.Bool], y: Annotated[Any, _atypes.Bool], name, ctx) -> Annotated[Any, _atypes.Bool]: + x = _ops.convert_to_tensor(x, _dtypes.bool) + y = _ops.convert_to_tensor(y, _dtypes.bool) + _inputs_flat = [x, y] + _attrs = None + _result = _execute.execute(b"LogicalOr", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LogicalOr", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_MatMul_T = TypeVar("TV_MatMul_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def mat_mul(a: Annotated[Any, TV_MatMul_T], b: Annotated[Any, TV_MatMul_T], transpose_a:bool=False, transpose_b:bool=False, grad_a:bool=False, grad_b:bool=False, name=None) -> Annotated[Any, TV_MatMul_T]: + r"""Multiply the matrix "a" by the matrix "b". + + The inputs must be two-dimensional matrices and the inner dimension of + "a" (after being transposed if transpose_a is true) must match the + outer dimension of "b" (after being transposed if transposed_b is + true). + + *Note*: The default kernel implementation for MatMul on GPUs uses + cublas. + + Args: + a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`. + b: A `Tensor`. Must have the same type as `a`. + transpose_a: An optional `bool`. Defaults to `False`. + If true, "a" is transposed before multiplication. + transpose_b: An optional `bool`. Defaults to `False`. + If true, "b" is transposed before multiplication. + grad_a: An optional `bool`. Defaults to `False`. + grad_b: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MatMul", name, a, b, "transpose_a", transpose_a, "transpose_b", + transpose_b, "grad_a", grad_a, "grad_b", grad_b) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mat_mul_eager_fallback( + a, b, transpose_a=transpose_a, transpose_b=transpose_b, + grad_a=grad_a, grad_b=grad_b, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if grad_a is None: + grad_a = False + grad_a = _execute.make_bool(grad_a, "grad_a") + if grad_b is None: + grad_b = False + grad_b = _execute.make_bool(grad_b, "grad_b") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b, + grad_a=grad_a, grad_b=grad_b, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", + _op._get_attr_bool("transpose_b"), "T", _op._get_attr_type("T"), + "grad_a", _op._get_attr_bool("grad_a"), "grad_b", + _op._get_attr_bool("grad_b")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MatMul = tf_export("raw_ops.MatMul")(_ops.to_raw_op(mat_mul)) + + +def mat_mul_eager_fallback(a: Annotated[Any, TV_MatMul_T], b: Annotated[Any, TV_MatMul_T], transpose_a: bool, transpose_b: bool, grad_a: bool, grad_b: bool, name, ctx) -> Annotated[Any, TV_MatMul_T]: + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if grad_a is None: + grad_a = False + grad_a = _execute.make_bool(grad_a, "grad_a") + if grad_b is None: + grad_b = False + grad_b = _execute.make_bool(grad_b, "grad_b") + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, _dtypes.complex64, _dtypes.complex128, ]) + (a, b) = _inputs_T + _inputs_flat = [a, b] + _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "T", + _attr_T, "grad_a", grad_a, "grad_b", grad_b) + _result = _execute.execute(b"MatMul", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Max_T = TypeVar("TV_Max_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Max_Tidx = TypeVar("TV_Max_Tidx", _atypes.Int32, _atypes.Int64) + +def _max(input: Annotated[Any, TV_Max_T], axis: Annotated[Any, TV_Max_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_Max_T]: + r"""Computes the maximum of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Max", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _max_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Max", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Max", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Max = tf_export("raw_ops.Max")(_ops.to_raw_op(_max)) + + +def _max_eager_fallback(input: Annotated[Any, TV_Max_T], axis: Annotated[Any, TV_Max_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_Max_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Max", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Maximum_T = TypeVar("TV_Maximum_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.maximum', 'maximum') +def maximum(x: Annotated[Any, TV_Maximum_T], y: Annotated[Any, TV_Maximum_T], name=None) -> Annotated[Any, TV_Maximum_T]: + r"""Returns the max of x and y (i.e. x > y ? x : y) element-wise. + + Example: + + >>> x = tf.constant([0., 0., 0., 0.]) + >>> y = tf.constant([-2., 0., 2., 5.]) + >>> tf.math.maximum(x, y) + + + Note that `maximum` supports [broadcast semantics](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) for `x` and `y`. + + >>> x = tf.constant([-5., 0., 0., 0.]) + >>> y = tf.constant([-3.]) + >>> tf.math.maximum(x, y) + + + The reduction version of this elementwise operation is `tf.math.reduce_max` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Maximum", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_maximum( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return maximum_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + maximum, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_maximum( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Maximum", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + maximum, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Maximum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Maximum = tf_export("raw_ops.Maximum")(_ops.to_raw_op(maximum)) +_dispatcher_for_maximum = maximum._tf_type_based_dispatcher.Dispatch + + +def maximum_eager_fallback(x: Annotated[Any, TV_Maximum_T], y: Annotated[Any, TV_Maximum_T], name, ctx) -> Annotated[Any, TV_Maximum_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.uint8, _dtypes.int16, _dtypes.uint16, _dtypes.int32, _dtypes.uint32, _dtypes.int64, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Maximum", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Maximum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Mean_T = TypeVar("TV_Mean_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Mean_Tidx = TypeVar("TV_Mean_Tidx", _atypes.Int32, _atypes.Int64) + +def mean(input: Annotated[Any, TV_Mean_T], axis: Annotated[Any, TV_Mean_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_Mean_T]: + r"""Computes the mean of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Mean", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mean_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Mean", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Mean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Mean = tf_export("raw_ops.Mean")(_ops.to_raw_op(mean)) + + +def mean_eager_fallback(input: Annotated[Any, TV_Mean_T], axis: Annotated[Any, TV_Mean_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_Mean_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Mean", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Mean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Min_T = TypeVar("TV_Min_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Min_Tidx = TypeVar("TV_Min_Tidx", _atypes.Int32, _atypes.Int64) + +def _min(input: Annotated[Any, TV_Min_T], axis: Annotated[Any, TV_Min_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_Min_T]: + r"""Computes the minimum of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Min", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _min_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Min", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Min", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Min = tf_export("raw_ops.Min")(_ops.to_raw_op(_min)) + + +def _min_eager_fallback(input: Annotated[Any, TV_Min_T], axis: Annotated[Any, TV_Min_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_Min_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Min", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Min", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Minimum_T = TypeVar("TV_Minimum_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.minimum', 'minimum') +def minimum(x: Annotated[Any, TV_Minimum_T], y: Annotated[Any, TV_Minimum_T], name=None) -> Annotated[Any, TV_Minimum_T]: + r"""Returns the min of x and y (i.e. x < y ? x : y) element-wise. + + Both inputs are number-type tensors (except complex). `minimum` expects that + both tensors have the same `dtype`. + + Examples: + + >>> x = tf.constant([0., 0., 0., 0.]) + >>> y = tf.constant([-5., -2., 0., 3.]) + >>> tf.math.minimum(x, y) + + + Note that `minimum` supports [broadcast semantics](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) for `x` and `y`. + + >>> x = tf.constant([-5., 0., 0., 0.]) + >>> y = tf.constant([-3.]) + >>> tf.math.minimum(x, y) + + + The reduction version of this elementwise operation is `tf.math.reduce_min` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Minimum", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_minimum( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return minimum_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + minimum, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_minimum( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Minimum", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + minimum, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Minimum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Minimum = tf_export("raw_ops.Minimum")(_ops.to_raw_op(minimum)) +_dispatcher_for_minimum = minimum._tf_type_based_dispatcher.Dispatch + + +def minimum_eager_fallback(x: Annotated[Any, TV_Minimum_T], y: Annotated[Any, TV_Minimum_T], name, ctx) -> Annotated[Any, TV_Minimum_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.uint8, _dtypes.int16, _dtypes.uint16, _dtypes.int32, _dtypes.uint32, _dtypes.int64, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Minimum", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Minimum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Mod_T = TypeVar("TV_Mod_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def mod(x: Annotated[Any, TV_Mod_T], y: Annotated[Any, TV_Mod_T], name=None) -> Annotated[Any, TV_Mod_T]: + r"""Returns element-wise remainder of division. This emulates C semantics in that + + the result here is consistent with a truncating divide. E.g. + `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + + *NOTE*: `Mod` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `half`, `half`, `bfloat16`, `float32`, `float64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Mod", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mod_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Mod", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Mod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Mod = tf_export("raw_ops.Mod")(_ops.to_raw_op(mod)) + + +def mod_eager_fallback(x: Annotated[Any, TV_Mod_T], y: Annotated[Any, TV_Mod_T], name, ctx) -> Annotated[Any, TV_Mod_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.half, _dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Mod", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Mod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Mul_T = TypeVar("TV_Mul_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def mul(x: Annotated[Any, TV_Mul_T], y: Annotated[Any, TV_Mul_T], name=None) -> Annotated[Any, TV_Mul_T]: + r"""Returns x * y element-wise. + + *NOTE*: `Multiply` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Mul", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mul_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Mul", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Mul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Mul = tf_export("raw_ops.Mul")(_ops.to_raw_op(mul)) + + +def mul_eager_fallback(x: Annotated[Any, TV_Mul_T], y: Annotated[Any, TV_Mul_T], name, ctx) -> Annotated[Any, TV_Mul_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.uint32, _dtypes.uint64, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Mul", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Mul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_MulNoNan_T = TypeVar("TV_MulNoNan_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def mul_no_nan(x: Annotated[Any, TV_MulNoNan_T], y: Annotated[Any, TV_MulNoNan_T], name=None) -> Annotated[Any, TV_MulNoNan_T]: + r"""Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + + *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MulNoNan", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mul_no_nan_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MulNoNan", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MulNoNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MulNoNan = tf_export("raw_ops.MulNoNan")(_ops.to_raw_op(mul_no_nan)) + + +def mul_no_nan_eager_fallback(x: Annotated[Any, TV_MulNoNan_T], y: Annotated[Any, TV_MulNoNan_T], name, ctx) -> Annotated[Any, TV_MulNoNan_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"MulNoNan", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MulNoNan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Ndtri_T = TypeVar("TV_Ndtri_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def ndtri(x: Annotated[Any, TV_Ndtri_T], name=None) -> Annotated[Any, TV_Ndtri_T]: + r"""TODO: add doc. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Ndtri", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ndtri_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Ndtri", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Ndtri", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Ndtri = tf_export("raw_ops.Ndtri")(_ops.to_raw_op(ndtri)) + + +def ndtri_eager_fallback(x: Annotated[Any, TV_Ndtri_T], name, ctx) -> Annotated[Any, TV_Ndtri_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Ndtri", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Ndtri", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Neg_T = TypeVar("TV_Neg_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.negative', 'negative') +def neg(x: Annotated[Any, TV_Neg_T], name=None) -> Annotated[Any, TV_Neg_T]: + r"""Computes numerical negative value element-wise. + + I.e., \\(y = -x\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Neg", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_neg( + (x, name,), None) + if _result is not NotImplemented: + return _result + return neg_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + neg, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_neg( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Neg", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + neg, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Neg", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Neg = tf_export("raw_ops.Neg")(_ops.to_raw_op(neg)) +_dispatcher_for_neg = neg._tf_type_based_dispatcher.Dispatch + + +def neg_eager_fallback(x: Annotated[Any, TV_Neg_T], name, ctx) -> Annotated[Any, TV_Neg_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Neg", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Neg", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_NextAfter_T = TypeVar("TV_NextAfter_T", _atypes.Float32, _atypes.Float64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.nextafter') +def next_after(x1: Annotated[Any, TV_NextAfter_T], x2: Annotated[Any, TV_NextAfter_T], name=None) -> Annotated[Any, TV_NextAfter_T]: + r"""Returns the next representable value of `x1` in the direction of `x2`, element-wise. + + This operation returns the same result as the C++ std::nextafter function. + + It can also return a subnormal number. + + @compatibility(cpp) + Equivalent to C++ std::nextafter function. + @end_compatibility + + Args: + x1: A `Tensor`. Must be one of the following types: `float64`, `float32`. + x2: A `Tensor`. Must have the same type as `x1`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x1`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "NextAfter", name, x1, x2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_next_after( + (x1, x2, name,), None) + if _result is not NotImplemented: + return _result + return next_after_eager_fallback( + x1, x2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + next_after, (), dict(x1=x1, x2=x2, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_next_after( + (x1, x2, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "NextAfter", x1=x1, x2=x2, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + next_after, (), dict(x1=x1, x2=x2, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "NextAfter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +NextAfter = tf_export("raw_ops.NextAfter")(_ops.to_raw_op(next_after)) +_dispatcher_for_next_after = next_after._tf_type_based_dispatcher.Dispatch + + +def next_after_eager_fallback(x1: Annotated[Any, TV_NextAfter_T], x2: Annotated[Any, TV_NextAfter_T], name, ctx) -> Annotated[Any, TV_NextAfter_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x1, x2], ctx, [_dtypes.float64, _dtypes.float32, ], _dtypes.float32) + (x1, x2) = _inputs_T + _inputs_flat = [x1, x2] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"NextAfter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "NextAfter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_NotEqual_T = TypeVar("TV_NotEqual_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def not_equal(x: Annotated[Any, TV_NotEqual_T], y: Annotated[Any, TV_NotEqual_T], incompatible_shape_error:bool=True, name=None) -> Annotated[Any, _atypes.Bool]: + r"""Returns the truth value of (x != y) element-wise. + + *NOTE*: `NotEqual` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. + y: A `Tensor`. Must have the same type as `x`. + incompatible_shape_error: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "NotEqual", name, x, y, "incompatible_shape_error", + incompatible_shape_error) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return not_equal_eager_fallback( + x, y, incompatible_shape_error=incompatible_shape_error, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if incompatible_shape_error is None: + incompatible_shape_error = True + incompatible_shape_error = _execute.make_bool(incompatible_shape_error, "incompatible_shape_error") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "NotEqual", x=x, y=y, + incompatible_shape_error=incompatible_shape_error, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "incompatible_shape_error", + _op._get_attr_bool("incompatible_shape_error")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "NotEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +NotEqual = tf_export("raw_ops.NotEqual")(_ops.to_raw_op(not_equal)) + + +def not_equal_eager_fallback(x: Annotated[Any, TV_NotEqual_T], y: Annotated[Any, TV_NotEqual_T], incompatible_shape_error: bool, name, ctx) -> Annotated[Any, _atypes.Bool]: + if incompatible_shape_error is None: + incompatible_shape_error = True + incompatible_shape_error = _execute.make_bool(incompatible_shape_error, "incompatible_shape_error") + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, []) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T, "incompatible_shape_error", + incompatible_shape_error) + _result = _execute.execute(b"NotEqual", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "NotEqual", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Polygamma_T = TypeVar("TV_Polygamma_T", _atypes.Float32, _atypes.Float64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.polygamma', v1=['math.polygamma', 'polygamma']) +@deprecated_endpoints('polygamma') +def polygamma(a: Annotated[Any, TV_Polygamma_T], x: Annotated[Any, TV_Polygamma_T], name=None) -> Annotated[Any, TV_Polygamma_T]: + r"""Compute the polygamma function \\(\psi^{(n)}(x)\\). + + The polygamma function is defined as: + + + \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) + + where \\(\psi(x)\\) is the digamma function. + The polygamma function is defined only for non-negative integer orders \\a\\. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`. + x: A `Tensor`. Must have the same type as `a`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Polygamma", name, a, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_polygamma( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + return polygamma_eager_fallback( + a, x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + polygamma, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_polygamma( + (a, x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Polygamma", a=a, x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + polygamma, (), dict(a=a, x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Polygamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Polygamma = tf_export("raw_ops.Polygamma")(_ops.to_raw_op(polygamma)) +_dispatcher_for_polygamma = polygamma._tf_type_based_dispatcher.Dispatch + + +def polygamma_eager_fallback(a: Annotated[Any, TV_Polygamma_T], x: Annotated[Any, TV_Polygamma_T], name, ctx) -> Annotated[Any, TV_Polygamma_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], ctx, [_dtypes.float32, _dtypes.float64, ]) + (a, x) = _inputs_T + _inputs_flat = [a, x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Polygamma", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Polygamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Pow_T = TypeVar("TV_Pow_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +def _pow(x: Annotated[Any, TV_Pow_T], y: Annotated[Any, TV_Pow_T], name=None) -> Annotated[Any, TV_Pow_T]: + r"""Computes the power of one value to another. + + Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for + corresponding elements in `x` and `y`. For example: + + ``` + # tensor 'x' is [[2, 2]], [3, 3]] + # tensor 'y' is [[8, 16], [2, 3]] + tf.pow(x, y) ==> [[256, 65536], [9, 27]] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Pow", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _pow_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Pow", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Pow", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Pow = tf_export("raw_ops.Pow")(_ops.to_raw_op(_pow)) + + +def _pow_eager_fallback(x: Annotated[Any, TV_Pow_T], y: Annotated[Any, TV_Pow_T], name, ctx) -> Annotated[Any, TV_Pow_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.half, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Pow", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Pow", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Prod_T = TypeVar("TV_Prod_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Prod_Tidx = TypeVar("TV_Prod_Tidx", _atypes.Int32, _atypes.Int64) + +def prod(input: Annotated[Any, TV_Prod_T], axis: Annotated[Any, TV_Prod_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_Prod_T]: + r"""Computes the product of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Prod", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return prod_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Prod", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Prod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Prod = tf_export("raw_ops.Prod")(_ops.to_raw_op(prod)) + + +def prod_eager_fallback(input: Annotated[Any, TV_Prod_T], axis: Annotated[Any, TV_Prod_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_Prod_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Prod", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Prod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_QuantizeDownAndShrinkRangeOutput = collections.namedtuple( + "QuantizeDownAndShrinkRange", + ["output", "output_min", "output_max"]) + + +TV_QuantizeDownAndShrinkRange_Tinput = TypeVar("TV_QuantizeDownAndShrinkRange_Tinput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizeDownAndShrinkRange_out_type = TypeVar("TV_QuantizeDownAndShrinkRange_out_type", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def quantize_down_and_shrink_range(input: Annotated[Any, TV_QuantizeDownAndShrinkRange_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], out_type: TV_QuantizeDownAndShrinkRange_out_type, name=None): + r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the + + actual distribution of the values to maximize the usage of the lower bit depth + and adjusting the output min and max ranges accordingly. + + [input_min, input_max] are scalar floats that specify the range for the float + interpretation of the 'input' data. For example, if input_min is -1.0f and + input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + + This operator tries to squeeze as much precision as possible into an output with + a lower bit depth by calculating the actual min and max values found in the + data. For example, maybe that quint16 input has no values lower than 16,384 and + none higher than 49,152. That means only half the range is actually needed, all + the float interpretations are between -0.5f and 0.5f, so if we want to compress + the data into a quint8 output, we can use that range rather than the theoretical + -1.0f to 1.0f that is suggested by the input min and max. + + In practice, this is most useful for taking output from operations like + QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + may have large potential output ranges, but in practice have a distribution of + input values that only uses a small fraction of the possible range. By feeding + that output into this operator, we can reduce it from 32 bits down to 8 with + minimal loss of accuracy. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + input_min: A `Tensor` of type `float32`. + The float value that the minimum quantized input value represents. + input_max: A `Tensor` of type `float32`. + The float value that the maximum quantized input value represents. + out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. + The type of the output. Should be a lower bit depth than Tinput. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, output_min, output_max). + + output: A `Tensor` of type `out_type`. + output_min: A `Tensor` of type `float32`. + output_max: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "QuantizeDownAndShrinkRange", name, input, input_min, input_max, + "out_type", out_type) + _result = _QuantizeDownAndShrinkRangeOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return quantize_down_and_shrink_range_eager_fallback( + input, input_min, input_max, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + out_type = _execute.make_type(out_type, "out_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "QuantizeDownAndShrinkRange", input=input, input_min=input_min, + input_max=input_max, out_type=out_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", + _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result) + _result = _QuantizeDownAndShrinkRangeOutput._make(_result) + return _result + +QuantizeDownAndShrinkRange = tf_export("raw_ops.QuantizeDownAndShrinkRange")(_ops.to_raw_op(quantize_down_and_shrink_range)) + + +def quantize_down_and_shrink_range_eager_fallback(input: Annotated[Any, TV_QuantizeDownAndShrinkRange_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], out_type: TV_QuantizeDownAndShrinkRange_out_type, name, ctx): + out_type = _execute.make_type(out_type, "out_type") + _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) + input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) + _inputs_flat = [input, input_min, input_max] + _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) + _result = _execute.execute(b"QuantizeDownAndShrinkRange", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result) + _result = _QuantizeDownAndShrinkRangeOutput._make(_result) + return _result + +_QuantizedAddOutput = collections.namedtuple( + "QuantizedAdd", + ["z", "min_z", "max_z"]) + + +TV_QuantizedAdd_T1 = TypeVar("TV_QuantizedAdd_T1", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedAdd_T2 = TypeVar("TV_QuantizedAdd_T2", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedAdd_Toutput = TypeVar("TV_QuantizedAdd_Toutput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def quantized_add(x: Annotated[Any, TV_QuantizedAdd_T1], y: Annotated[Any, TV_QuantizedAdd_T2], min_x: Annotated[Any, _atypes.Float32], max_x: Annotated[Any, _atypes.Float32], min_y: Annotated[Any, _atypes.Float32], max_y: Annotated[Any, _atypes.Float32], Toutput:TV_QuantizedAdd_Toutput=_dtypes.qint32, name=None): + r"""Returns x + y element-wise, working on quantized buffers. + + Args: + x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + min_x: A `Tensor` of type `float32`. + The float value that the lowest quantized `x` value represents. + max_x: A `Tensor` of type `float32`. + The float value that the highest quantized `x` value represents. + min_y: A `Tensor` of type `float32`. + The float value that the lowest quantized `y` value represents. + max_y: A `Tensor` of type `float32`. + The float value that the highest quantized `y` value represents. + Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (z, min_z, max_z). + + z: A `Tensor` of type `Toutput`. + min_z: A `Tensor` of type `float32`. + max_z: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "QuantizedAdd", name, x, y, min_x, max_x, min_y, max_y, + "Toutput", Toutput) + _result = _QuantizedAddOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return quantized_add_eager_fallback( + x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "QuantizedAdd", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y, + max_y=max_y, Toutput=Toutput, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), + "Toutput", _op._get_attr_type("Toutput")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "QuantizedAdd", _inputs_flat, _attrs, _result) + _result = _QuantizedAddOutput._make(_result) + return _result + +QuantizedAdd = tf_export("raw_ops.QuantizedAdd")(_ops.to_raw_op(quantized_add)) + + +def quantized_add_eager_fallback(x: Annotated[Any, TV_QuantizedAdd_T1], y: Annotated[Any, TV_QuantizedAdd_T2], min_x: Annotated[Any, _atypes.Float32], max_x: Annotated[Any, _atypes.Float32], min_y: Annotated[Any, _atypes.Float32], max_y: Annotated[Any, _atypes.Float32], Toutput: TV_QuantizedAdd_Toutput, name, ctx): + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + _attr_T1, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + _attr_T2, (y,) = _execute.args_to_matching_eager([y], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + min_x = _ops.convert_to_tensor(min_x, _dtypes.float32) + max_x = _ops.convert_to_tensor(max_x, _dtypes.float32) + min_y = _ops.convert_to_tensor(min_y, _dtypes.float32) + max_y = _ops.convert_to_tensor(max_y, _dtypes.float32) + _inputs_flat = [x, y, min_x, max_x, min_y, max_y] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput) + _result = _execute.execute(b"QuantizedAdd", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "QuantizedAdd", _inputs_flat, _attrs, _result) + _result = _QuantizedAddOutput._make(_result) + return _result + +_QuantizedMatMulOutput = collections.namedtuple( + "QuantizedMatMul", + ["out", "min_out", "max_out"]) + + +TV_QuantizedMatMul_T1 = TypeVar("TV_QuantizedMatMul_T1", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedMatMul_T2 = TypeVar("TV_QuantizedMatMul_T2", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedMatMul_Toutput = TypeVar("TV_QuantizedMatMul_Toutput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedMatMul_Tactivation = TypeVar("TV_QuantizedMatMul_Tactivation", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def quantized_mat_mul(a: Annotated[Any, TV_QuantizedMatMul_T1], b: Annotated[Any, TV_QuantizedMatMul_T2], min_a: Annotated[Any, _atypes.Float32], max_a: Annotated[Any, _atypes.Float32], min_b: Annotated[Any, _atypes.Float32], max_b: Annotated[Any, _atypes.Float32], Toutput:TV_QuantizedMatMul_Toutput=_dtypes.qint32, transpose_a:bool=False, transpose_b:bool=False, Tactivation:TV_QuantizedMatMul_Tactivation=_dtypes.quint8, name=None): + r"""Perform a quantized matrix multiplication of `a` by the matrix `b`. + + The inputs must be two-dimensional matrices and the inner dimension of + `a` (after being transposed if `transpose_a` is non-zero) must match the + outer dimension of `b` (after being transposed if `transposed_b` is + non-zero). + + Args: + a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + Must be a two-dimensional tensor. + b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + Must be a two-dimensional tensor. + min_a: A `Tensor` of type `float32`. + The float value that the lowest quantized `a` value represents. + max_a: A `Tensor` of type `float32`. + The float value that the highest quantized `a` value represents. + min_b: A `Tensor` of type `float32`. + The float value that the lowest quantized `b` value represents. + max_b: A `Tensor` of type `float32`. + The float value that the highest quantized `b` value represents. + Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. + transpose_a: An optional `bool`. Defaults to `False`. + If true, `a` is transposed before multiplication. + transpose_b: An optional `bool`. Defaults to `False`. + If true, `b` is transposed before multiplication. + Tactivation: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. + The type of output produced by activation function + following this operation. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (out, min_out, max_out). + + out: A `Tensor` of type `Toutput`. + min_out: A `Tensor` of type `float32`. + max_out: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "QuantizedMatMul", name, a, b, min_a, max_a, min_b, max_b, + "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", + transpose_b, "Tactivation", Tactivation) + _result = _QuantizedMatMulOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return quantized_mat_mul_eager_fallback( + a, b, min_a, max_a, min_b, max_b, Toutput=Toutput, + transpose_a=transpose_a, transpose_b=transpose_b, + Tactivation=Tactivation, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if Tactivation is None: + Tactivation = _dtypes.quint8 + Tactivation = _execute.make_type(Tactivation, "Tactivation") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "QuantizedMatMul", a=a, b=b, min_a=min_a, max_a=max_a, min_b=min_b, + max_b=max_b, Toutput=Toutput, + transpose_a=transpose_a, transpose_b=transpose_b, + Tactivation=Tactivation, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), + "Toutput", _op._get_attr_type("Toutput"), "transpose_a", + _op._get_attr_bool("transpose_a"), "transpose_b", + _op._get_attr_bool("transpose_b"), "Tactivation", + _op._get_attr_type("Tactivation")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "QuantizedMatMul", _inputs_flat, _attrs, _result) + _result = _QuantizedMatMulOutput._make(_result) + return _result + +QuantizedMatMul = tf_export("raw_ops.QuantizedMatMul")(_ops.to_raw_op(quantized_mat_mul)) + + +def quantized_mat_mul_eager_fallback(a: Annotated[Any, TV_QuantizedMatMul_T1], b: Annotated[Any, TV_QuantizedMatMul_T2], min_a: Annotated[Any, _atypes.Float32], max_a: Annotated[Any, _atypes.Float32], min_b: Annotated[Any, _atypes.Float32], max_b: Annotated[Any, _atypes.Float32], Toutput: TV_QuantizedMatMul_Toutput, transpose_a: bool, transpose_b: bool, Tactivation: TV_QuantizedMatMul_Tactivation, name, ctx): + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if Tactivation is None: + Tactivation = _dtypes.quint8 + Tactivation = _execute.make_type(Tactivation, "Tactivation") + _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) + max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) + min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) + max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) + _inputs_flat = [a, b, min_a, max_a, min_b, max_b] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a", + transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation) + _result = _execute.execute(b"QuantizedMatMul", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "QuantizedMatMul", _inputs_flat, _attrs, _result) + _result = _QuantizedMatMulOutput._make(_result) + return _result + +_QuantizedMulOutput = collections.namedtuple( + "QuantizedMul", + ["z", "min_z", "max_z"]) + + +TV_QuantizedMul_T1 = TypeVar("TV_QuantizedMul_T1", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedMul_T2 = TypeVar("TV_QuantizedMul_T2", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_QuantizedMul_Toutput = TypeVar("TV_QuantizedMul_Toutput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def quantized_mul(x: Annotated[Any, TV_QuantizedMul_T1], y: Annotated[Any, TV_QuantizedMul_T2], min_x: Annotated[Any, _atypes.Float32], max_x: Annotated[Any, _atypes.Float32], min_y: Annotated[Any, _atypes.Float32], max_y: Annotated[Any, _atypes.Float32], Toutput:TV_QuantizedMul_Toutput=_dtypes.qint32, name=None): + r"""Returns x * y element-wise, working on quantized buffers. + + Args: + x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + min_x: A `Tensor` of type `float32`. + The float value that the lowest quantized `x` value represents. + max_x: A `Tensor` of type `float32`. + The float value that the highest quantized `x` value represents. + min_y: A `Tensor` of type `float32`. + The float value that the lowest quantized `y` value represents. + max_y: A `Tensor` of type `float32`. + The float value that the highest quantized `y` value represents. + Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (z, min_z, max_z). + + z: A `Tensor` of type `Toutput`. + min_z: A `Tensor` of type `float32`. + max_z: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "QuantizedMul", name, x, y, min_x, max_x, min_y, max_y, + "Toutput", Toutput) + _result = _QuantizedMulOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return quantized_mul_eager_fallback( + x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "QuantizedMul", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y, + max_y=max_y, Toutput=Toutput, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), + "Toutput", _op._get_attr_type("Toutput")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "QuantizedMul", _inputs_flat, _attrs, _result) + _result = _QuantizedMulOutput._make(_result) + return _result + +QuantizedMul = tf_export("raw_ops.QuantizedMul")(_ops.to_raw_op(quantized_mul)) + + +def quantized_mul_eager_fallback(x: Annotated[Any, TV_QuantizedMul_T1], y: Annotated[Any, TV_QuantizedMul_T2], min_x: Annotated[Any, _atypes.Float32], max_x: Annotated[Any, _atypes.Float32], min_y: Annotated[Any, _atypes.Float32], max_y: Annotated[Any, _atypes.Float32], Toutput: TV_QuantizedMul_Toutput, name, ctx): + if Toutput is None: + Toutput = _dtypes.qint32 + Toutput = _execute.make_type(Toutput, "Toutput") + _attr_T1, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + _attr_T2, (y,) = _execute.args_to_matching_eager([y], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + min_x = _ops.convert_to_tensor(min_x, _dtypes.float32) + max_x = _ops.convert_to_tensor(max_x, _dtypes.float32) + min_y = _ops.convert_to_tensor(min_y, _dtypes.float32) + max_y = _ops.convert_to_tensor(max_y, _dtypes.float32) + _inputs_flat = [x, y, min_x, max_x, min_y, max_y] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput) + _result = _execute.execute(b"QuantizedMul", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "QuantizedMul", _inputs_flat, _attrs, _result) + _result = _QuantizedMulOutput._make(_result) + return _result + + +TV_RaggedBincount_Tidx = TypeVar("TV_RaggedBincount_Tidx", _atypes.Int32, _atypes.Int64) +TV_RaggedBincount_T = TypeVar("TV_RaggedBincount_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def ragged_bincount(splits: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_RaggedBincount_Tidx], size: Annotated[Any, TV_RaggedBincount_Tidx], weights: Annotated[Any, TV_RaggedBincount_T], binary_output:bool=False, name=None) -> Annotated[Any, TV_RaggedBincount_T]: + r"""Counts the number of occurrences of each value in an integer array. + + Outputs a vector with length `size` and the same dtype as `weights`. If + `weights` are empty, then index `i` stores the number of times the value `i` is + counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + the value in `weights` at each index where the corresponding value in `arr` is + `i`. + + Values in `arr` outside of the range [0, size) are ignored. + + Args: + splits: A `Tensor` of type `int64`. 1D int64 `Tensor`. + values: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2D int `Tensor`. + size: A `Tensor`. Must have the same type as `values`. + non-negative int scalar `Tensor`. + weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + is an int32, int64, float32, or float64 `Tensor` with the same + shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + equal to 1. + binary_output: An optional `bool`. Defaults to `False`. + bool; Whether the kernel should count the appearance or number of occurrences. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `weights`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedBincount", name, splits, values, size, weights, + "binary_output", binary_output) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ragged_bincount_eager_fallback( + splits, values, size, weights, binary_output=binary_output, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedBincount", splits=splits, values=values, size=size, + weights=weights, binary_output=binary_output, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tidx", _op._get_attr_type("Tidx"), "T", + _op._get_attr_type("T"), "binary_output", + _op._get_attr_bool("binary_output")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RaggedBincount = tf_export("raw_ops.RaggedBincount")(_ops.to_raw_op(ragged_bincount)) + + +def ragged_bincount_eager_fallback(splits: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_RaggedBincount_Tidx], size: Annotated[Any, TV_RaggedBincount_Tidx], weights: Annotated[Any, TV_RaggedBincount_T], binary_output: bool, name, ctx) -> Annotated[Any, TV_RaggedBincount_T]: + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([values, size], ctx, [_dtypes.int32, _dtypes.int64, ]) + (values, size) = _inputs_Tidx + _attr_T, (weights,) = _execute.args_to_matching_eager([weights], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + splits = _ops.convert_to_tensor(splits, _dtypes.int64) + _inputs_flat = [splits, values, size, weights] + _attrs = ("Tidx", _attr_Tidx, "T", _attr_T, "binary_output", binary_output) + _result = _execute.execute(b"RaggedBincount", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Range_Tidx = TypeVar("TV_Range_Tidx", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32) + +def _range(start: Annotated[Any, TV_Range_Tidx], limit: Annotated[Any, TV_Range_Tidx], delta: Annotated[Any, TV_Range_Tidx], name=None) -> Annotated[Any, TV_Range_Tidx]: + r"""Creates a sequence of numbers. + + This operation creates a sequence of numbers that begins at `start` and + extends by increments of `delta` up to but not including `limit`. + + For example: + + ``` + # 'start' is 3 + # 'limit' is 18 + # 'delta' is 3 + tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + ``` + + Args: + start: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint16`, `uint32`. + 0-D (scalar). First entry in the sequence. + limit: A `Tensor`. Must have the same type as `start`. + 0-D (scalar). Upper limit of sequence, exclusive. + delta: A `Tensor`. Must have the same type as `start`. + 0-D (scalar). Optional. Default is 1. Number that increments `start`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `start`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Range", name, start, limit, delta) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _range_eager_fallback( + start, limit, delta, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Range", start=start, limit=limit, delta=delta, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Range", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Range = tf_export("raw_ops.Range")(_ops.to_raw_op(_range)) + + +def _range_eager_fallback(start: Annotated[Any, TV_Range_Tidx], limit: Annotated[Any, TV_Range_Tidx], delta: Annotated[Any, TV_Range_Tidx], name, ctx) -> Annotated[Any, TV_Range_Tidx]: + _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([start, limit, delta], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint16, _dtypes.uint32, ], _dtypes.int32) + (start, limit, delta) = _inputs_Tidx + _inputs_flat = [start, limit, delta] + _attrs = ("Tidx", _attr_Tidx) + _result = _execute.execute(b"Range", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Range", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Real_T = TypeVar("TV_Real_T", _atypes.Complex128, _atypes.Complex64) +TV_Real_Tout = TypeVar("TV_Real_Tout", _atypes.Float32, _atypes.Float64) + +def real(input: Annotated[Any, TV_Real_T], Tout:TV_Real_Tout=_dtypes.float32, name=None) -> Annotated[Any, TV_Real_Tout]: + r"""Returns the real part of a complex number. + + Given a tensor `input` of complex numbers, this operation returns a tensor of + type `float` that is the real part of each element in `input`. All elements in + `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real + part returned by this operation and *b* is the imaginary part. + + For example: + + ``` + # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.real(input) ==> [-2.25, 3.25] + ``` + + Args: + input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. + Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Real", name, input, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return real_eager_fallback( + input, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Real", input=input, Tout=Tout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tout", + _op._get_attr_type("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Real", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Real = tf_export("raw_ops.Real")(_ops.to_raw_op(real)) + + +def real_eager_fallback(input: Annotated[Any, TV_Real_T], Tout: TV_Real_Tout, name, ctx) -> Annotated[Any, TV_Real_Tout]: + if Tout is None: + Tout = _dtypes.float32 + Tout = _execute.make_type(Tout, "Tout") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.complex64, _dtypes.complex128, ], _dtypes.complex64) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "Tout", Tout) + _result = _execute.execute(b"Real", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Real", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RealDiv_T = TypeVar("TV_RealDiv_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('realdiv') +def real_div(x: Annotated[Any, TV_RealDiv_T], y: Annotated[Any, TV_RealDiv_T], name=None) -> Annotated[Any, TV_RealDiv_T]: + r"""Returns x / y element-wise for real types. + + If `x` and `y` are reals, this will return the floating-point division. + + *NOTE*: `Div` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RealDiv", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_real_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return real_div_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + real_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_real_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RealDiv", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + real_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RealDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RealDiv = tf_export("raw_ops.RealDiv")(_ops.to_raw_op(real_div)) +_dispatcher_for_real_div = real_div._tf_type_based_dispatcher.Dispatch + + +def real_div_eager_fallback(x: Annotated[Any, TV_RealDiv_T], y: Annotated[Any, TV_RealDiv_T], name, ctx) -> Annotated[Any, TV_RealDiv_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.uint32, _dtypes.uint64, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RealDiv", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RealDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Reciprocal_T = TypeVar("TV_Reciprocal_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.reciprocal', v1=['math.reciprocal', 'reciprocal']) +@deprecated_endpoints('reciprocal') +def reciprocal(x: Annotated[Any, TV_Reciprocal_T], name=None) -> Annotated[Any, TV_Reciprocal_T]: + r"""Computes the reciprocal of x element-wise. + + I.e., \\(y = 1 / x\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Reciprocal", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_reciprocal( + (x, name,), None) + if _result is not NotImplemented: + return _result + return reciprocal_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + reciprocal, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_reciprocal( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Reciprocal", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + reciprocal, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Reciprocal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Reciprocal = tf_export("raw_ops.Reciprocal")(_ops.to_raw_op(reciprocal)) +_dispatcher_for_reciprocal = reciprocal._tf_type_based_dispatcher.Dispatch + + +def reciprocal_eager_fallback(x: Annotated[Any, TV_Reciprocal_T], name, ctx) -> Annotated[Any, TV_Reciprocal_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Reciprocal", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Reciprocal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ReciprocalGrad_T = TypeVar("TV_ReciprocalGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def reciprocal_grad(y: Annotated[Any, TV_ReciprocalGrad_T], dy: Annotated[Any, TV_ReciprocalGrad_T], name=None) -> Annotated[Any, TV_ReciprocalGrad_T]: + r"""Computes the gradient for the inverse of `x` wrt its input. + + Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ReciprocalGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return reciprocal_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ReciprocalGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ReciprocalGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ReciprocalGrad = tf_export("raw_ops.ReciprocalGrad")(_ops.to_raw_op(reciprocal_grad)) + + +def reciprocal_grad_eager_fallback(y: Annotated[Any, TV_ReciprocalGrad_T], dy: Annotated[Any, TV_ReciprocalGrad_T], name, ctx) -> Annotated[Any, TV_ReciprocalGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"ReciprocalGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ReciprocalGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_RequantizationRangeOutput = collections.namedtuple( + "RequantizationRange", + ["output_min", "output_max"]) + + +TV_RequantizationRange_Tinput = TypeVar("TV_RequantizationRange_Tinput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def requantization_range(input: Annotated[Any, TV_RequantizationRange_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], name=None): + r"""Computes a range that covers the actual values present in a quantized tensor. + + Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + range that covers the actual values present in that tensor. This op is typically + used to produce the `requested_output_min` and `requested_output_max` for + `Requantize`. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + input_min: A `Tensor` of type `float32`. + The float value that the minimum quantized input value represents. + input_max: A `Tensor` of type `float32`. + The float value that the maximum quantized input value represents. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_min, output_max). + + output_min: A `Tensor` of type `float32`. + output_max: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RequantizationRange", name, input, input_min, input_max) + _result = _RequantizationRangeOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return requantization_range_eager_fallback( + input, input_min, input_max, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RequantizationRange", input=input, input_min=input_min, + input_max=input_max, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tinput", _op._get_attr_type("Tinput")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RequantizationRange", _inputs_flat, _attrs, _result) + _result = _RequantizationRangeOutput._make(_result) + return _result + +RequantizationRange = tf_export("raw_ops.RequantizationRange")(_ops.to_raw_op(requantization_range)) + + +def requantization_range_eager_fallback(input: Annotated[Any, TV_RequantizationRange_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], name, ctx): + _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) + input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) + _inputs_flat = [input, input_min, input_max] + _attrs = ("Tinput", _attr_Tinput) + _result = _execute.execute(b"RequantizationRange", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RequantizationRange", _inputs_flat, _attrs, _result) + _result = _RequantizationRangeOutput._make(_result) + return _result + +_RequantizationRangePerChannelOutput = collections.namedtuple( + "RequantizationRangePerChannel", + ["output_min", "output_max"]) + + +TV_RequantizationRangePerChannel_T = TypeVar("TV_RequantizationRangePerChannel_T", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def requantization_range_per_channel(input: Annotated[Any, TV_RequantizationRangePerChannel_T], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], clip_value_max: float, name=None): + r"""Computes requantization range per channel. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + The original input tensor. + input_min: A `Tensor` of type `float32`. + The minimum value of the input tensor + input_max: A `Tensor` of type `float32`. + The maximum value of the input tensor. + clip_value_max: A `float`. + The maximum value of the output that needs to be clipped. + Example: set this to 6 for Relu6. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_min, output_max). + + output_min: A `Tensor` of type `float32`. + output_max: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RequantizationRangePerChannel", name, input, input_min, + input_max, "clip_value_max", clip_value_max) + _result = _RequantizationRangePerChannelOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return requantization_range_per_channel_eager_fallback( + input, input_min, input_max, clip_value_max=clip_value_max, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + clip_value_max = _execute.make_float(clip_value_max, "clip_value_max") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RequantizationRangePerChannel", input=input, input_min=input_min, + input_max=input_max, + clip_value_max=clip_value_max, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "clip_value_max", + _op.get_attr("clip_value_max")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RequantizationRangePerChannel", _inputs_flat, _attrs, _result) + _result = _RequantizationRangePerChannelOutput._make(_result) + return _result + +RequantizationRangePerChannel = tf_export("raw_ops.RequantizationRangePerChannel")(_ops.to_raw_op(requantization_range_per_channel)) + + +def requantization_range_per_channel_eager_fallback(input: Annotated[Any, TV_RequantizationRangePerChannel_T], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], clip_value_max: float, name, ctx): + clip_value_max = _execute.make_float(clip_value_max, "clip_value_max") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ], _dtypes.qint32) + input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) + input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) + _inputs_flat = [input, input_min, input_max] + _attrs = ("T", _attr_T, "clip_value_max", clip_value_max) + _result = _execute.execute(b"RequantizationRangePerChannel", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RequantizationRangePerChannel", _inputs_flat, _attrs, _result) + _result = _RequantizationRangePerChannelOutput._make(_result) + return _result + +_RequantizeOutput = collections.namedtuple( + "Requantize", + ["output", "output_min", "output_max"]) + + +TV_Requantize_Tinput = TypeVar("TV_Requantize_Tinput", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_Requantize_out_type = TypeVar("TV_Requantize_out_type", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def requantize(input: Annotated[Any, TV_Requantize_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], requested_output_min: Annotated[Any, _atypes.Float32], requested_output_max: Annotated[Any, _atypes.Float32], out_type: TV_Requantize_out_type, name=None): + r"""Converts the quantized `input` tensor into a lower-precision `output`. + + Converts the quantized `input` tensor into a lower-precision `output`, using the + output range specified with `requested_output_min` and `requested_output_max`. + + `[input_min, input_max]` are scalar floats that specify the range for the float + interpretation of the `input` data. For example, if `input_min` is -1.0f and + `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + input_min: A `Tensor` of type `float32`. + The float value that the minimum quantized input value represents. + input_max: A `Tensor` of type `float32`. + The float value that the maximum quantized input value represents. + requested_output_min: A `Tensor` of type `float32`. + The float value that the minimum quantized output value represents. + requested_output_max: A `Tensor` of type `float32`. + The float value that the maximum quantized output value represents. + out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. + The type of the output. Should be a lower bit depth than Tinput. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, output_min, output_max). + + output: A `Tensor` of type `out_type`. + output_min: A `Tensor` of type `float32`. + output_max: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Requantize", name, input, input_min, input_max, + requested_output_min, requested_output_max, "out_type", out_type) + _result = _RequantizeOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return requantize_eager_fallback( + input, input_min, input_max, requested_output_min, + requested_output_max, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + out_type = _execute.make_type(out_type, "out_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Requantize", input=input, input_min=input_min, input_max=input_max, + requested_output_min=requested_output_min, + requested_output_max=requested_output_max, + out_type=out_type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", + _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Requantize", _inputs_flat, _attrs, _result) + _result = _RequantizeOutput._make(_result) + return _result + +Requantize = tf_export("raw_ops.Requantize")(_ops.to_raw_op(requantize)) + + +def requantize_eager_fallback(input: Annotated[Any, TV_Requantize_Tinput], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], requested_output_min: Annotated[Any, _atypes.Float32], requested_output_max: Annotated[Any, _atypes.Float32], out_type: TV_Requantize_out_type, name, ctx): + out_type = _execute.make_type(out_type, "out_type") + _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) + input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) + input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) + requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32) + requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32) + _inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max] + _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) + _result = _execute.execute(b"Requantize", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Requantize", _inputs_flat, _attrs, _result) + _result = _RequantizeOutput._make(_result) + return _result + +_RequantizePerChannelOutput = collections.namedtuple( + "RequantizePerChannel", + ["output", "output_min", "output_max"]) + + +TV_RequantizePerChannel_T = TypeVar("TV_RequantizePerChannel_T", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) +TV_RequantizePerChannel_out_type = TypeVar("TV_RequantizePerChannel_out_type", _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8) + +def requantize_per_channel(input: Annotated[Any, TV_RequantizePerChannel_T], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], requested_output_min: Annotated[Any, _atypes.Float32], requested_output_max: Annotated[Any, _atypes.Float32], out_type:TV_RequantizePerChannel_out_type=_dtypes.quint8, name=None): + r"""Requantizes input with min and max values known per channel. + + Args: + input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. + The original input tensor. + input_min: A `Tensor` of type `float32`. + The minimum value of the input tensor + input_max: A `Tensor` of type `float32`. + The maximum value of the input tensor. + requested_output_min: A `Tensor` of type `float32`. + The minimum value of the output tensor requested. + requested_output_max: A `Tensor` of type `float32`. + The maximum value of the output tensor requested. + out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. + The quantized type of output tensor that needs to be converted. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, output_min, output_max). + + output: A `Tensor` of type `out_type`. + output_min: A `Tensor` of type `float32`. + output_max: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RequantizePerChannel", name, input, input_min, input_max, + requested_output_min, requested_output_max, "out_type", out_type) + _result = _RequantizePerChannelOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return requantize_per_channel_eager_fallback( + input, input_min, input_max, requested_output_min, + requested_output_max, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if out_type is None: + out_type = _dtypes.quint8 + out_type = _execute.make_type(out_type, "out_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RequantizePerChannel", input=input, input_min=input_min, + input_max=input_max, + requested_output_min=requested_output_min, + requested_output_max=requested_output_max, + out_type=out_type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "out_type", + _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RequantizePerChannel", _inputs_flat, _attrs, _result) + _result = _RequantizePerChannelOutput._make(_result) + return _result + +RequantizePerChannel = tf_export("raw_ops.RequantizePerChannel")(_ops.to_raw_op(requantize_per_channel)) + + +def requantize_per_channel_eager_fallback(input: Annotated[Any, TV_RequantizePerChannel_T], input_min: Annotated[Any, _atypes.Float32], input_max: Annotated[Any, _atypes.Float32], requested_output_min: Annotated[Any, _atypes.Float32], requested_output_max: Annotated[Any, _atypes.Float32], out_type: TV_RequantizePerChannel_out_type, name, ctx): + if out_type is None: + out_type = _dtypes.quint8 + out_type = _execute.make_type(out_type, "out_type") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ], _dtypes.qint32) + input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) + input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) + requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32) + requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32) + _inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max] + _attrs = ("T", _attr_T, "out_type", out_type) + _result = _execute.execute(b"RequantizePerChannel", 3, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RequantizePerChannel", _inputs_flat, _attrs, _result) + _result = _RequantizePerChannelOutput._make(_result) + return _result + + +TV_Rint_T = TypeVar("TV_Rint_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.rint', v1=['math.rint', 'rint']) +@deprecated_endpoints('rint') +def rint(x: Annotated[Any, TV_Rint_T], name=None) -> Annotated[Any, TV_Rint_T]: + r"""Returns element-wise integer closest to x. + + If the result is midway between two representable values, + the even representable is chosen. + For example: + + ``` + rint(-1.5) ==> -2.0 + rint(0.5000001) ==> 1.0 + rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Rint", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rint( + (x, name,), None) + if _result is not NotImplemented: + return _result + return rint_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rint, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rint( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Rint", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rint, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Rint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Rint = tf_export("raw_ops.Rint")(_ops.to_raw_op(rint)) +_dispatcher_for_rint = rint._tf_type_based_dispatcher.Dispatch + + +def rint_eager_fallback(x: Annotated[Any, TV_Rint_T], name, ctx) -> Annotated[Any, TV_Rint_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Rint", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Rint", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Round_T = TypeVar("TV_Round_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +def round(x: Annotated[Any, TV_Round_T], name=None) -> Annotated[Any, TV_Round_T]: + r"""Rounds the values of a tensor to the nearest integer, element-wise. + + Rounds half to even. Also known as bankers rounding. If you want to round + according to the current system rounding mode use std::cint. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Round", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return round_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Round", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Round", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Round = tf_export("raw_ops.Round")(_ops.to_raw_op(round)) + + +def round_eager_fallback(x: Annotated[Any, TV_Round_T], name, ctx) -> Annotated[Any, TV_Round_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Round", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Round", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Rsqrt_T = TypeVar("TV_Rsqrt_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def rsqrt(x: Annotated[Any, TV_Rsqrt_T], name=None) -> Annotated[Any, TV_Rsqrt_T]: + r"""Computes reciprocal of square root of x element-wise. + + I.e., \\(y = 1 / \sqrt{x}\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Rsqrt", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rsqrt_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Rsqrt", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Rsqrt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Rsqrt = tf_export("raw_ops.Rsqrt")(_ops.to_raw_op(rsqrt)) + + +def rsqrt_eager_fallback(x: Annotated[Any, TV_Rsqrt_T], name, ctx) -> Annotated[Any, TV_Rsqrt_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Rsqrt", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Rsqrt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RsqrtGrad_T = TypeVar("TV_RsqrtGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def rsqrt_grad(y: Annotated[Any, TV_RsqrtGrad_T], dy: Annotated[Any, TV_RsqrtGrad_T], name=None) -> Annotated[Any, TV_RsqrtGrad_T]: + r"""Computes the gradient for the rsqrt of `x` wrt its input. + + Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` + is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RsqrtGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rsqrt_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RsqrtGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RsqrtGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RsqrtGrad = tf_export("raw_ops.RsqrtGrad")(_ops.to_raw_op(rsqrt_grad)) + + +def rsqrt_grad_eager_fallback(y: Annotated[Any, TV_RsqrtGrad_T], dy: Annotated[Any, TV_RsqrtGrad_T], name, ctx) -> Annotated[Any, TV_RsqrtGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RsqrtGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RsqrtGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentMax_T = TypeVar("TV_SegmentMax_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentMax_Tindices = TypeVar("TV_SegmentMax_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.segment_max', v1=['math.segment_max', 'segment_max']) +@deprecated_endpoints('segment_max') +def segment_max(data: Annotated[Any, TV_SegmentMax_T], segment_ids: Annotated[Any, TV_SegmentMax_Tindices], name=None) -> Annotated[Any, TV_SegmentMax_T]: + r"""Computes the maximum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \max_j(data_j)\\) where `max` is over `j` such + that `segment_ids[j] == i`. + + If the max is empty for a given segment ID `i`, `output[i] = 0`. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + +
+ +
+ + For example: + + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy() + array([[4, 3, 3, 4], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentMax", name, data, segment_ids) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_segment_max( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + return segment_max_eager_fallback( + data, segment_ids, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_max, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_segment_max( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentMax", data=data, segment_ids=segment_ids, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_max, (), dict(data=data, segment_ids=segment_ids, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentMax = tf_export("raw_ops.SegmentMax")(_ops.to_raw_op(segment_max)) +_dispatcher_for_segment_max = segment_max._tf_type_based_dispatcher.Dispatch + + +def segment_max_eager_fallback(data: Annotated[Any, TV_SegmentMax_T], segment_ids: Annotated[Any, TV_SegmentMax_Tindices], name, ctx) -> Annotated[Any, TV_SegmentMax_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [data, segment_ids] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"SegmentMax", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentMaxV2_T = TypeVar("TV_SegmentMaxV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentMaxV2_Tindices = TypeVar("TV_SegmentMaxV2_Tindices", _atypes.Int32, _atypes.Int64) +TV_SegmentMaxV2_Tnumsegments = TypeVar("TV_SegmentMaxV2_Tnumsegments", _atypes.Int32, _atypes.Int64) + +def segment_max_v2(data: Annotated[Any, TV_SegmentMaxV2_T], segment_ids: Annotated[Any, TV_SegmentMaxV2_Tindices], num_segments: Annotated[Any, TV_SegmentMaxV2_Tnumsegments], name=None) -> Annotated[Any, TV_SegmentMaxV2_T]: + r"""Computes the maximum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \max_j(data_j)\\) where `max` is over `j` such + that `segment_ids[j] == i`. + + If the maximum is empty for a given segment ID `i`, it outputs the smallest + possible value for the specific numeric type, + `output[i] = numeric_limits::lowest()`. + + Note: That this op is currently only supported with jit_compile=True. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + + The only difference with SegmentMax is the additional input `num_segments`. + This helps in evaluating the output shape in compile time. + `num_segments` should be consistent with segment_ids. + e.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids + With inconsistent num_segments, the op still runs. only difference is, + the output takes the size of num_segments irrespective of size of segment_ids and data. + for num_segments less than expected output size, the last elements are ignored + for num_segments more than the expected output size, last elements are assigned + smallest possible value for the specific numeric type. + + For example: + + >>> @tf.function(jit_compile=True) + ... def test(c): + ... return tf.raw_ops.SegmentMaxV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> test(c).numpy() + array([[4, 3, 3, 4], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + The values must be less than `num_segments`. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentMaxV2", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return segment_max_v2_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentMaxV2", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentMaxV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentMaxV2 = tf_export("raw_ops.SegmentMaxV2")(_ops.to_raw_op(segment_max_v2)) + + +def segment_max_v2_eager_fallback(data: Annotated[Any, TV_SegmentMaxV2_T], segment_ids: Annotated[Any, TV_SegmentMaxV2_Tindices], num_segments: Annotated[Any, TV_SegmentMaxV2_Tnumsegments], name, ctx) -> Annotated[Any, TV_SegmentMaxV2_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"SegmentMaxV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentMaxV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentMean_T = TypeVar("TV_SegmentMean_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentMean_Tindices = TypeVar("TV_SegmentMean_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.segment_mean', v1=['math.segment_mean', 'segment_mean']) +@deprecated_endpoints('segment_mean') +def segment_mean(data: Annotated[Any, TV_SegmentMean_T], segment_ids: Annotated[Any, TV_SegmentMean_Tindices], name=None) -> Annotated[Any, TV_SegmentMean_T]: + r"""Computes the mean along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is + over `j` such that `segment_ids[j] == i` and `N` is the total number of + values summed. + + If the mean is empty for a given segment ID `i`, `output[i] = 0`. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as a smaller following index when computing the numerator + of the mean. + +
+ +
+ + For example: + + >>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy() + array([[2.5, 2.5, 2.5, 2.5], + [5., 6., 7., 8.]], dtype=float32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentMean", name, data, segment_ids) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_segment_mean( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + return segment_mean_eager_fallback( + data, segment_ids, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_mean, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_segment_mean( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentMean", data=data, segment_ids=segment_ids, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_mean, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentMean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentMean = tf_export("raw_ops.SegmentMean")(_ops.to_raw_op(segment_mean)) +_dispatcher_for_segment_mean = segment_mean._tf_type_based_dispatcher.Dispatch + + +def segment_mean_eager_fallback(data: Annotated[Any, TV_SegmentMean_T], segment_ids: Annotated[Any, TV_SegmentMean_Tindices], name, ctx) -> Annotated[Any, TV_SegmentMean_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [data, segment_ids] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"SegmentMean", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentMean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentMin_T = TypeVar("TV_SegmentMin_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentMin_Tindices = TypeVar("TV_SegmentMin_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.segment_min', v1=['math.segment_min', 'segment_min']) +@deprecated_endpoints('segment_min') +def segment_min(data: Annotated[Any, TV_SegmentMin_T], segment_ids: Annotated[Any, TV_SegmentMin_Tindices], name=None) -> Annotated[Any, TV_SegmentMin_T]: + r"""Computes the minimum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \min_j(data_j)\\) where `min` is over `j` such + that `segment_ids[j] == i`. + + If the min is empty for a given segment ID `i`, `output[i] = 0`. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + +
+ +
+ + For example: + + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy() + array([[1, 2, 2, 1], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentMin", name, data, segment_ids) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_segment_min( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + return segment_min_eager_fallback( + data, segment_ids, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_min, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_segment_min( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentMin", data=data, segment_ids=segment_ids, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_min, (), dict(data=data, segment_ids=segment_ids, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentMin = tf_export("raw_ops.SegmentMin")(_ops.to_raw_op(segment_min)) +_dispatcher_for_segment_min = segment_min._tf_type_based_dispatcher.Dispatch + + +def segment_min_eager_fallback(data: Annotated[Any, TV_SegmentMin_T], segment_ids: Annotated[Any, TV_SegmentMin_Tindices], name, ctx) -> Annotated[Any, TV_SegmentMin_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [data, segment_ids] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"SegmentMin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentMinV2_T = TypeVar("TV_SegmentMinV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentMinV2_Tindices = TypeVar("TV_SegmentMinV2_Tindices", _atypes.Int32, _atypes.Int64) +TV_SegmentMinV2_Tnumsegments = TypeVar("TV_SegmentMinV2_Tnumsegments", _atypes.Int32, _atypes.Int64) + +def segment_min_v2(data: Annotated[Any, TV_SegmentMinV2_T], segment_ids: Annotated[Any, TV_SegmentMinV2_Tindices], num_segments: Annotated[Any, TV_SegmentMinV2_Tnumsegments], name=None) -> Annotated[Any, TV_SegmentMinV2_T]: + r"""Computes the minimum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \min_j(data_j)\\) where `min` is over `j` such + that `segment_ids[j] == i`. + + If the minimum is empty for a given segment ID `i`, it outputs the largest + possible value for the specific numeric type, + `output[i] = numeric_limits::max()`. + + Note: That this op is currently only supported with jit_compile=True. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + + The only difference with SegmentMin is the additional input `num_segments`. + This helps in evaluating the output shape in compile time. + `num_segments` should be consistent with segment_ids. + e.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids + With inconsistent num_segments, the op still runs. only difference is, + the output takes the size of num_segments irrespective of size of segment_ids and data. + for num_segments less than expected output size, the last elements are ignored + for num_segments more than the expected output size, last elements are assigned + the largest possible value for the specific numeric type. + + For example: + + >>> @tf.function(jit_compile=True) + ... def test(c): + ... return tf.raw_ops.SegmentMinV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> test(c).numpy() + array([[1, 2, 2, 1], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + The values must be less than `num_segments`. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentMinV2", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return segment_min_v2_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentMinV2", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentMinV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentMinV2 = tf_export("raw_ops.SegmentMinV2")(_ops.to_raw_op(segment_min_v2)) + + +def segment_min_v2_eager_fallback(data: Annotated[Any, TV_SegmentMinV2_T], segment_ids: Annotated[Any, TV_SegmentMinV2_Tindices], num_segments: Annotated[Any, TV_SegmentMinV2_Tnumsegments], name, ctx) -> Annotated[Any, TV_SegmentMinV2_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"SegmentMinV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentMinV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentProd_T = TypeVar("TV_SegmentProd_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentProd_Tindices = TypeVar("TV_SegmentProd_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.segment_prod', v1=['math.segment_prod', 'segment_prod']) +@deprecated_endpoints('segment_prod') +def segment_prod(data: Annotated[Any, TV_SegmentProd_T], segment_ids: Annotated[Any, TV_SegmentProd_Tindices], name=None) -> Annotated[Any, TV_SegmentProd_T]: + r"""Computes the product along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \prod_j data_j\\) where the product is over `j` such + that `segment_ids[j] == i`. + + If the product is empty for a given segment ID `i`, `output[i] = 1`. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + +
+ +
+ + For example: + + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy() + array([[4, 6, 6, 4], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentProd", name, data, segment_ids) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_segment_prod( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + return segment_prod_eager_fallback( + data, segment_ids, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_prod, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_segment_prod( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentProd", data=data, segment_ids=segment_ids, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_prod, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentProd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentProd = tf_export("raw_ops.SegmentProd")(_ops.to_raw_op(segment_prod)) +_dispatcher_for_segment_prod = segment_prod._tf_type_based_dispatcher.Dispatch + + +def segment_prod_eager_fallback(data: Annotated[Any, TV_SegmentProd_T], segment_ids: Annotated[Any, TV_SegmentProd_Tindices], name, ctx) -> Annotated[Any, TV_SegmentProd_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [data, segment_ids] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"SegmentProd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentProd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentProdV2_T = TypeVar("TV_SegmentProdV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentProdV2_Tindices = TypeVar("TV_SegmentProdV2_Tindices", _atypes.Int32, _atypes.Int64) +TV_SegmentProdV2_Tnumsegments = TypeVar("TV_SegmentProdV2_Tnumsegments", _atypes.Int32, _atypes.Int64) + +def segment_prod_v2(data: Annotated[Any, TV_SegmentProdV2_T], segment_ids: Annotated[Any, TV_SegmentProdV2_Tindices], num_segments: Annotated[Any, TV_SegmentProdV2_Tnumsegments], name=None) -> Annotated[Any, TV_SegmentProdV2_T]: + r"""Computes the product along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \prod_j data_j\\) where the product is over `j` such + that `segment_ids[j] == i`. + + If the product is empty for a given segment ID `i`, `output[i] = 1`. + + Note: That this op is currently only supported with jit_compile=True. + + The only difference with SegmentProd is the additional input `num_segments`. + This helps in evaluating the output shape in compile time. + `num_segments` should be consistent with segment_ids. + e.g. Max(segment_ids) - 1 should be equal to `num_segments` for a 1-d segment_ids + With inconsistent num_segments, the op still runs. only difference is, + the output takes the size of num_segments irrespective of size of segment_ids and data. + for num_segments less than expected output size, the last elements are ignored + for num_segments more than the expected output size, last elements are assigned 1. + + For example: + + >>> @tf.function(jit_compile=True) + ... def test(c): + ... return tf.raw_ops.SegmentProdV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> test(c).numpy() + array([[4, 6, 6, 4], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + The values must be less than `num_segments`. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentProdV2", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return segment_prod_v2_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentProdV2", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentProdV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentProdV2 = tf_export("raw_ops.SegmentProdV2")(_ops.to_raw_op(segment_prod_v2)) + + +def segment_prod_v2_eager_fallback(data: Annotated[Any, TV_SegmentProdV2_T], segment_ids: Annotated[Any, TV_SegmentProdV2_Tindices], num_segments: Annotated[Any, TV_SegmentProdV2_Tnumsegments], name, ctx) -> Annotated[Any, TV_SegmentProdV2_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"SegmentProdV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentProdV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentSum_T = TypeVar("TV_SegmentSum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentSum_Tindices = TypeVar("TV_SegmentSum_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.segment_sum', v1=['math.segment_sum', 'segment_sum']) +@deprecated_endpoints('segment_sum') +def segment_sum(data: Annotated[Any, TV_SegmentSum_T], segment_ids: Annotated[Any, TV_SegmentSum_Tindices], name=None) -> Annotated[Any, TV_SegmentSum_T]: + r"""Computes the sum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \sum_j data_j\\) where sum is over `j` such + that `segment_ids[j] == i`. + + If the sum is empty for a given segment ID `i`, `output[i] = 0`. + + Caution: On CPU, values in `segment_ids` are always validated to be sorted, + and an error is thrown for indices that are not increasing. On GPU, this + does not throw an error for unsorted indices. On GPU, out-of-order indices + result in safe but unspecified behavior, which may include treating + out-of-order indices as the same as a smaller following index. + +
+ +
+ + For example: + + >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy() + array([[5, 5, 5, 5], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentSum", name, data, segment_ids) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_segment_sum( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + return segment_sum_eager_fallback( + data, segment_ids, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_sum, (), dict(data=data, segment_ids=segment_ids, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_segment_sum( + (data, segment_ids, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentSum", data=data, segment_ids=segment_ids, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + segment_sum, (), dict(data=data, segment_ids=segment_ids, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentSum = tf_export("raw_ops.SegmentSum")(_ops.to_raw_op(segment_sum)) +_dispatcher_for_segment_sum = segment_sum._tf_type_based_dispatcher.Dispatch + + +def segment_sum_eager_fallback(data: Annotated[Any, TV_SegmentSum_T], segment_ids: Annotated[Any, TV_SegmentSum_Tindices], name, ctx) -> Annotated[Any, TV_SegmentSum_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [data, segment_ids] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"SegmentSum", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SegmentSumV2_T = TypeVar("TV_SegmentSumV2_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SegmentSumV2_Tindices = TypeVar("TV_SegmentSumV2_Tindices", _atypes.Int32, _atypes.Int64) +TV_SegmentSumV2_Tnumsegments = TypeVar("TV_SegmentSumV2_Tnumsegments", _atypes.Int32, _atypes.Int64) + +def segment_sum_v2(data: Annotated[Any, TV_SegmentSumV2_T], segment_ids: Annotated[Any, TV_SegmentSumV2_Tindices], num_segments: Annotated[Any, TV_SegmentSumV2_Tnumsegments], name=None) -> Annotated[Any, TV_SegmentSumV2_T]: + r"""Computes the sum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output_i = \sum_j data_j\\) where sum is over `j` such + that `segment_ids[j] == i`. + + If the sum is empty for a given segment ID `i`, `output[i] = 0`. + + Note that this op is currently only supported with jit_compile=True. + + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor whose size is equal to the size of `data`'s + first dimension. Values should be sorted and can be repeated. + The values must be less than `num_segments`. + + Caution: The values are always validated to be sorted on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SegmentSumV2", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return segment_sum_v2_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SegmentSumV2", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SegmentSumV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SegmentSumV2 = tf_export("raw_ops.SegmentSumV2")(_ops.to_raw_op(segment_sum_v2)) + + +def segment_sum_v2_eager_fallback(data: Annotated[Any, TV_SegmentSumV2_T], segment_ids: Annotated[Any, TV_SegmentSumV2_Tindices], num_segments: Annotated[Any, TV_SegmentSumV2_Tnumsegments], name, ctx) -> Annotated[Any, TV_SegmentSumV2_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"SegmentSumV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SegmentSumV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Select_T = TypeVar("TV_Select_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def select(condition: Annotated[Any, _atypes.Bool], x: Annotated[Any, TV_Select_T], y: Annotated[Any, TV_Select_T], name=None) -> Annotated[Any, TV_Select_T]: + r"""Selects elements from `x` or `y`, depending on `condition`. + + The `x`, and `y` tensors must all have the same shape, and the + output will also have that shape. + + The `condition` tensor must be a scalar if `x` and `y` are scalars. + If `x` and `y` are vectors or higher rank, then `condition` must be either a + scalar, a vector with size matching the first dimension of `x`, or must have + the same shape as `x`. + + The `condition` tensor acts as a mask that chooses, based on the value at each + element, whether the corresponding element / row in the output should be + taken from `x` (if true) or `y` (if false). + + If `condition` is a vector and `x` and `y` are higher rank matrices, then + it chooses which row (outer dimension) to copy from `x` and `y`. + If `condition` has the same shape as `x` and `y`, then it chooses which + element to copy from `x` and `y`. + + For example: + + ```python + # 'condition' tensor is [[True, False] + # [False, True]] + # 't' is [[1, 2], + # [3, 4]] + # 'e' is [[5, 6], + # [7, 8]] + select(condition, t, e) # => [[1, 6], [7, 4]] + + + # 'condition' tensor is [True, False] + # 't' is [[1, 2], + # [3, 4]] + # 'e' is [[5, 6], + # [7, 8]] + select(condition, t, e) ==> [[1, 2], + [7, 8]] + + ``` + + Args: + condition: A `Tensor` of type `bool`. + x: A `Tensor` which may have the same shape as `condition`. + If `condition` is rank 1, `x` may have higher rank, + but its first dimension must match the size of `condition`. + y: A `Tensor` with the same type and shape as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `t`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Select", name, condition, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return select_eager_fallback( + condition, x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Select", condition=condition, t=x, e=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Select", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Select = tf_export("raw_ops.Select")(_ops.to_raw_op(select)) + + +def select_eager_fallback(condition: Annotated[Any, _atypes.Bool], x: Annotated[Any, TV_Select_T], y: Annotated[Any, TV_Select_T], name, ctx) -> Annotated[Any, TV_Select_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, []) + (x, y) = _inputs_T + condition = _ops.convert_to_tensor(condition, _dtypes.bool) + _inputs_flat = [condition, x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Select", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Select", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SelectV2_T = TypeVar("TV_SelectV2_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def select_v2(condition: Annotated[Any, _atypes.Bool], t: Annotated[Any, TV_SelectV2_T], e: Annotated[Any, TV_SelectV2_T], name=None) -> Annotated[Any, TV_SelectV2_T]: + r"""TODO: add doc. + + Args: + condition: A `Tensor` of type `bool`. + t: A `Tensor`. + e: A `Tensor`. Must have the same type as `t`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `t`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SelectV2", name, condition, t, e) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return select_v2_eager_fallback( + condition, t, e, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SelectV2", condition=condition, t=t, e=e, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SelectV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SelectV2 = tf_export("raw_ops.SelectV2")(_ops.to_raw_op(select_v2)) + + +def select_v2_eager_fallback(condition: Annotated[Any, _atypes.Bool], t: Annotated[Any, TV_SelectV2_T], e: Annotated[Any, TV_SelectV2_T], name, ctx) -> Annotated[Any, TV_SelectV2_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([t, e], ctx, []) + (t, e) = _inputs_T + condition = _ops.convert_to_tensor(condition, _dtypes.bool) + _inputs_flat = [condition, t, e] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SelectV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SelectV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sigmoid_T = TypeVar("TV_Sigmoid_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def sigmoid(x: Annotated[Any, TV_Sigmoid_T], name=None) -> Annotated[Any, TV_Sigmoid_T]: + r"""Computes sigmoid of `x` element-wise. + + Specifically, `y = 1 / (1 + exp(-x))`. + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sigmoid", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sigmoid_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sigmoid", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sigmoid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sigmoid = tf_export("raw_ops.Sigmoid")(_ops.to_raw_op(sigmoid)) + + +def sigmoid_eager_fallback(x: Annotated[Any, TV_Sigmoid_T], name, ctx) -> Annotated[Any, TV_Sigmoid_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sigmoid", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sigmoid", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SigmoidGrad_T = TypeVar("TV_SigmoidGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def sigmoid_grad(y: Annotated[Any, TV_SigmoidGrad_T], dy: Annotated[Any, TV_SigmoidGrad_T], name=None) -> Annotated[Any, TV_SigmoidGrad_T]: + r"""Computes the gradient of the sigmoid of `x` wrt its input. + + Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + `dy` is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SigmoidGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sigmoid_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SigmoidGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SigmoidGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SigmoidGrad = tf_export("raw_ops.SigmoidGrad")(_ops.to_raw_op(sigmoid_grad)) + + +def sigmoid_grad_eager_fallback(y: Annotated[Any, TV_SigmoidGrad_T], dy: Annotated[Any, TV_SigmoidGrad_T], name, ctx) -> Annotated[Any, TV_SigmoidGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SigmoidGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SigmoidGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sign_T = TypeVar("TV_Sign_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8) + +def sign(x: Annotated[Any, TV_Sign_T], name=None) -> Annotated[Any, TV_Sign_T]: + r"""Returns an element-wise indication of the sign of a number. + + `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + + For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + + Example usage: + >>> tf.math.sign([0., 2., -3.]) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sign", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sign_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sign", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sign", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sign = tf_export("raw_ops.Sign")(_ops.to_raw_op(sign)) + + +def sign_eager_fallback(x: Annotated[Any, TV_Sign_T], name, ctx) -> Annotated[Any, TV_Sign_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sign", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sign", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sin_T = TypeVar("TV_Sin_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.sin', 'sin') +def sin(x: Annotated[Any, TV_Sin_T], name=None) -> Annotated[Any, TV_Sin_T]: + r"""Computes sine of x element-wise. + + Given an input tensor, this function computes sine of every + element in the tensor. Input range is `(-inf, inf)` and + output range is `[-1,1]`. + + ```python + x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sin", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_sin( + (x, name,), None) + if _result is not NotImplemented: + return _result + return sin_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + sin, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_sin( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sin", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + sin, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sin = tf_export("raw_ops.Sin")(_ops.to_raw_op(sin)) +_dispatcher_for_sin = sin._tf_type_based_dispatcher.Dispatch + + +def sin_eager_fallback(x: Annotated[Any, TV_Sin_T], name, ctx) -> Annotated[Any, TV_Sin_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sin", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sinh_T = TypeVar("TV_Sinh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.sinh', 'sinh') +def sinh(x: Annotated[Any, TV_Sinh_T], name=None) -> Annotated[Any, TV_Sinh_T]: + r"""Computes hyperbolic sine of x element-wise. + + Given an input tensor, this function computes hyperbolic sine of every + element in the tensor. Input range is `[-inf,inf]` and output range + is `[-inf,inf]`. + + ```python + x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sinh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_sinh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return sinh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + sinh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_sinh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sinh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + sinh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sinh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sinh = tf_export("raw_ops.Sinh")(_ops.to_raw_op(sinh)) +_dispatcher_for_sinh = sinh._tf_type_based_dispatcher.Dispatch + + +def sinh_eager_fallback(x: Annotated[Any, TV_Sinh_T], name, ctx) -> Annotated[Any, TV_Sinh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sinh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sinh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SobolSample_dtype = TypeVar("TV_SobolSample_dtype", _atypes.Float32, _atypes.Float64) + +def sobol_sample(dim: Annotated[Any, _atypes.Int32], num_results: Annotated[Any, _atypes.Int32], skip: Annotated[Any, _atypes.Int32], dtype:TV_SobolSample_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_SobolSample_dtype]: + r"""Generates points from the Sobol sequence. + + Creates a Sobol sequence with `num_results` samples. Each sample has dimension + `dim`. Skips the first `skip` samples. + + Args: + dim: A `Tensor` of type `int32`. + Positive scalar `Tensor` representing each sample's dimension. + num_results: A `Tensor` of type `int32`. + Positive scalar `Tensor` of dtype int32. The number of Sobol points to return + in the output. + skip: A `Tensor` of type `int32`. + Positive scalar `Tensor` of dtype int32. The number of initial points of the + Sobol sequence to skip. + dtype: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. + The type of the sample. One of: `float32` or `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SobolSample", name, dim, num_results, skip, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sobol_sample_eager_fallback( + dim, num_results, skip, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SobolSample", dim=dim, num_results=num_results, skip=skip, + dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SobolSample", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SobolSample = tf_export("raw_ops.SobolSample")(_ops.to_raw_op(sobol_sample)) + + +def sobol_sample_eager_fallback(dim: Annotated[Any, _atypes.Int32], num_results: Annotated[Any, _atypes.Int32], skip: Annotated[Any, _atypes.Int32], dtype: TV_SobolSample_dtype, name, ctx) -> Annotated[Any, TV_SobolSample_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + dim = _ops.convert_to_tensor(dim, _dtypes.int32) + num_results = _ops.convert_to_tensor(num_results, _dtypes.int32) + skip = _ops.convert_to_tensor(skip, _dtypes.int32) + _inputs_flat = [dim, num_results, skip] + _attrs = ("dtype", dtype) + _result = _execute.execute(b"SobolSample", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SobolSample", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseBincount_Tidx = TypeVar("TV_SparseBincount_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseBincount_T = TypeVar("TV_SparseBincount_T", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def sparse_bincount(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseBincount_Tidx], dense_shape: Annotated[Any, _atypes.Int64], size: Annotated[Any, TV_SparseBincount_Tidx], weights: Annotated[Any, TV_SparseBincount_T], binary_output:bool=False, name=None) -> Annotated[Any, TV_SparseBincount_T]: + r"""Counts the number of occurrences of each value in an integer array. + + Outputs a vector with length `size` and the same dtype as `weights`. If + `weights` are empty, then index `i` stores the number of times the value `i` is + counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + the value in `weights` at each index where the corresponding value in `arr` is + `i`. + + Values in `arr` outside of the range [0, size) are ignored. + + Args: + indices: A `Tensor` of type `int64`. 2D int64 `Tensor`. + values: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 1D int `Tensor`. + dense_shape: A `Tensor` of type `int64`. 1D int64 `Tensor`. + size: A `Tensor`. Must have the same type as `values`. + non-negative int scalar `Tensor`. + weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. + is an int32, int64, float32, or float64 `Tensor` with the same + shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + equal to 1. + binary_output: An optional `bool`. Defaults to `False`. + bool; Whether the kernel should count the appearance or number of occurrences. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `weights`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseBincount", name, indices, values, dense_shape, size, + weights, "binary_output", binary_output) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_bincount_eager_fallback( + indices, values, dense_shape, size, weights, + binary_output=binary_output, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseBincount", indices=indices, values=values, + dense_shape=dense_shape, size=size, weights=weights, + binary_output=binary_output, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tidx", _op._get_attr_type("Tidx"), "T", + _op._get_attr_type("T"), "binary_output", + _op._get_attr_bool("binary_output")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseBincount = tf_export("raw_ops.SparseBincount")(_ops.to_raw_op(sparse_bincount)) + + +def sparse_bincount_eager_fallback(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseBincount_Tidx], dense_shape: Annotated[Any, _atypes.Int64], size: Annotated[Any, TV_SparseBincount_Tidx], weights: Annotated[Any, TV_SparseBincount_T], binary_output: bool, name, ctx) -> Annotated[Any, TV_SparseBincount_T]: + if binary_output is None: + binary_output = False + binary_output = _execute.make_bool(binary_output, "binary_output") + _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([values, size], ctx, [_dtypes.int32, _dtypes.int64, ]) + (values, size) = _inputs_Tidx + _attr_T, (weights,) = _execute.args_to_matching_eager([weights], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.float32, _dtypes.float64, ]) + indices = _ops.convert_to_tensor(indices, _dtypes.int64) + dense_shape = _ops.convert_to_tensor(dense_shape, _dtypes.int64) + _inputs_flat = [indices, values, dense_shape, size, weights] + _attrs = ("Tidx", _attr_Tidx, "T", _attr_T, "binary_output", binary_output) + _result = _execute.execute(b"SparseBincount", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseBincount", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatMul_Ta = TypeVar("TV_SparseMatMul_Ta", _atypes.BFloat16, _atypes.Float32) +TV_SparseMatMul_Tb = TypeVar("TV_SparseMatMul_Tb", _atypes.BFloat16, _atypes.Float32) + +def sparse_mat_mul(a: Annotated[Any, TV_SparseMatMul_Ta], b: Annotated[Any, TV_SparseMatMul_Tb], transpose_a:bool=False, transpose_b:bool=False, a_is_sparse:bool=False, b_is_sparse:bool=False, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Multiply matrix "a" by matrix "b". + + The inputs must be two-dimensional matrices and the inner dimension of "a" must + match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not + `SparseTensor`s. This op is optimized for the case where at least one of "a" or + "b" is sparse, in the sense that they have a large proportion of zero values. + The breakeven for using this versus a dense matrix multiply on one platform was + 30% zero values in the sparse matrix. + + The gradient computation of this operation will only take advantage of sparsity + in the input gradient when that gradient comes from a Relu. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`. + b: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`. + transpose_a: An optional `bool`. Defaults to `False`. + transpose_b: An optional `bool`. Defaults to `False`. + a_is_sparse: An optional `bool`. Defaults to `False`. + b_is_sparse: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatMul", name, a, b, "transpose_a", transpose_a, + "transpose_b", transpose_b, "a_is_sparse", a_is_sparse, "b_is_sparse", + b_is_sparse) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_mat_mul_eager_fallback( + a, b, transpose_a=transpose_a, transpose_b=transpose_b, + a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if a_is_sparse is None: + a_is_sparse = False + a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse") + if b_is_sparse is None: + b_is_sparse = False + b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatMul", a=a, b=b, transpose_a=transpose_a, + transpose_b=transpose_b, a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", + _op._get_attr_bool("transpose_b"), "a_is_sparse", + _op._get_attr_bool("a_is_sparse"), "b_is_sparse", + _op._get_attr_bool("b_is_sparse"), "Ta", + _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatMul = tf_export("raw_ops.SparseMatMul")(_ops.to_raw_op(sparse_mat_mul)) + + +def sparse_mat_mul_eager_fallback(a: Annotated[Any, TV_SparseMatMul_Ta], b: Annotated[Any, TV_SparseMatMul_Tb], transpose_a: bool, transpose_b: bool, a_is_sparse: bool, b_is_sparse: bool, name, ctx) -> Annotated[Any, _atypes.Float32]: + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if a_is_sparse is None: + a_is_sparse = False + a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse") + if b_is_sparse is None: + b_is_sparse = False + b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse") + _attr_Ta, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.float32, _dtypes.bfloat16, ], _dtypes.float32) + _attr_Tb, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.float32, _dtypes.bfloat16, ], _dtypes.float32) + _inputs_flat = [a, b] + _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, + "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", _attr_Ta, + "Tb", _attr_Tb) + _result = _execute.execute(b"SparseMatMul", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentMean_T = TypeVar("TV_SparseSegmentMean_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentMean_Tidx = TypeVar("TV_SparseSegmentMean_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentMean_Tsegmentids = TypeVar("TV_SparseSegmentMean_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_mean(data: Annotated[Any, TV_SparseSegmentMean_T], indices: Annotated[Any, TV_SparseSegmentMean_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMean_Tsegmentids], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentMean_T]: + r"""Computes the mean along sparse segments of a tensor. + + See `tf.sparse.segment_sum` for usage examples. + + Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by `indices`. + + Args: + data: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentMean", name, data, indices, segment_ids, + "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_mean_eager_fallback( + data, indices, segment_ids, sparse_gradient=sparse_gradient, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentMean", data=data, indices=indices, + segment_ids=segment_ids, + sparse_gradient=sparse_gradient, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentMean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentMean = tf_export("raw_ops.SparseSegmentMean")(_ops.to_raw_op(sparse_segment_mean)) + + +def sparse_segment_mean_eager_fallback(data: Annotated[Any, TV_SparseSegmentMean_T], indices: Annotated[Any, TV_SparseSegmentMean_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMean_Tsegmentids], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentMean_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids, "sparse_gradient", sparse_gradient) + _result = _execute.execute(b"SparseSegmentMean", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentMean", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentMeanGrad_T = TypeVar("TV_SparseSegmentMeanGrad_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentMeanGrad_Tidx = TypeVar("TV_SparseSegmentMeanGrad_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentMeanGrad_Tsegmentids = TypeVar("TV_SparseSegmentMeanGrad_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_mean_grad(grad: Annotated[Any, TV_SparseSegmentMeanGrad_T], indices: Annotated[Any, TV_SparseSegmentMeanGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_SparseSegmentMeanGrad_T]: + r"""Computes gradients for SparseSegmentMean. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0. + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentMean op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentMean op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentMean op. + output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentMean op. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `grad`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentMeanGrad", name, grad, indices, segment_ids, + output_dim0) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_mean_grad_eager_fallback( + grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentMeanGrad", grad=grad, indices=indices, + segment_ids=segment_ids, + output_dim0=output_dim0, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentMeanGrad = tf_export("raw_ops.SparseSegmentMeanGrad")(_ops.to_raw_op(sparse_segment_mean_grad)) + + +def sparse_segment_mean_grad_eager_fallback(grad: Annotated[Any, TV_SparseSegmentMeanGrad_T], indices: Annotated[Any, TV_SparseSegmentMeanGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_SparseSegmentMeanGrad_T]: + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentMeanGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_SparseSegmentMeanGradV2Output = collections.namedtuple( + "SparseSegmentMeanGradV2", + ["output", "sorted_unique_indices"]) + + +TV_SparseSegmentMeanGradV2_T = TypeVar("TV_SparseSegmentMeanGradV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentMeanGradV2_Tidx = TypeVar("TV_SparseSegmentMeanGradV2_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentMeanGradV2_Tsegmentids = TypeVar("TV_SparseSegmentMeanGradV2_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_mean_grad_v2(grad: Annotated[Any, TV_SparseSegmentMeanGradV2_T], indices: Annotated[Any, TV_SparseSegmentMeanGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name=None): + r"""Computes gradients for SparseSegmentMean. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is the number of unique indexes in "indices". Also returns vector + "sorted_unique_indices" containing the corresponding indexes from "indices". + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentMean op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentMean op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentMean op. + dense_output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentMean op. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, sorted_unique_indices). + + output: A `Tensor`. Has the same type as `grad`. + sorted_unique_indices: A `Tensor`. Has the same type as `indices`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentMeanGradV2", name, grad, indices, segment_ids, + dense_output_dim0) + _result = _SparseSegmentMeanGradV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_mean_grad_v2_eager_fallback( + grad, indices, segment_ids, dense_output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentMeanGradV2", grad=grad, indices=indices, + segment_ids=segment_ids, + dense_output_dim0=dense_output_dim0, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentMeanGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentMeanGradV2Output._make(_result) + return _result + +SparseSegmentMeanGradV2 = tf_export("raw_ops.SparseSegmentMeanGradV2")(_ops.to_raw_op(sparse_segment_mean_grad_v2)) + + +def sparse_segment_mean_grad_v2_eager_fallback(grad: Annotated[Any, TV_SparseSegmentMeanGradV2_T], indices: Annotated[Any, TV_SparseSegmentMeanGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name, ctx): + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + dense_output_dim0 = _ops.convert_to_tensor(dense_output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, dense_output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentMeanGradV2", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentMeanGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentMeanGradV2Output._make(_result) + return _result + + +TV_SparseSegmentMeanWithNumSegments_T = TypeVar("TV_SparseSegmentMeanWithNumSegments_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentMeanWithNumSegments_Tidx = TypeVar("TV_SparseSegmentMeanWithNumSegments_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentMeanWithNumSegments_Tnumsegments = TypeVar("TV_SparseSegmentMeanWithNumSegments_Tnumsegments", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentMeanWithNumSegments_Tsegmentids = TypeVar("TV_SparseSegmentMeanWithNumSegments_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_mean_with_num_segments(data: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tnumsegments], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentMeanWithNumSegments_T]: + r"""Computes the mean along sparse segments of a tensor. + + Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + missing, the `output` tensor at that position will be zeroed. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Args: + data: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Should equal the number of distinct segment IDs. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentMeanWithNumSegments", name, data, indices, + segment_ids, num_segments, "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_mean_with_num_segments_eager_fallback( + data, indices, segment_ids, num_segments, + sparse_gradient=sparse_gradient, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentMeanWithNumSegments", data=data, indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tnumsegments", + _op._get_attr_type("Tnumsegments"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentMeanWithNumSegments = tf_export("raw_ops.SparseSegmentMeanWithNumSegments")(_ops.to_raw_op(sparse_segment_mean_with_num_segments)) + + +def sparse_segment_mean_with_num_segments_eager_fallback(data: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentMeanWithNumSegments_Tnumsegments], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentMeanWithNumSegments_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", + _attr_Tnumsegments, "Tsegmentids", _attr_Tsegmentids, "sparse_gradient", + sparse_gradient) + _result = _execute.execute(b"SparseSegmentMeanWithNumSegments", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentSqrtN_T = TypeVar("TV_SparseSegmentSqrtN_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSqrtN_Tidx = TypeVar("TV_SparseSegmentSqrtN_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSqrtN_Tsegmentids = TypeVar("TV_SparseSegmentSqrtN_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sqrt_n(data: Annotated[Any, TV_SparseSegmentSqrtN_T], indices: Annotated[Any, TV_SparseSegmentSqrtN_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtN_Tsegmentids], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentSqrtN_T]: + r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N. + + N is the size of the segment being reduced. + + See `tf.sparse.segment_sum` for usage examples. + + Args: + data: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSqrtN", name, data, indices, segment_ids, + "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sqrt_n_eager_fallback( + data, indices, segment_ids, sparse_gradient=sparse_gradient, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSqrtN", data=data, indices=indices, + segment_ids=segment_ids, + sparse_gradient=sparse_gradient, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSqrtN", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSqrtN = tf_export("raw_ops.SparseSegmentSqrtN")(_ops.to_raw_op(sparse_segment_sqrt_n)) + + +def sparse_segment_sqrt_n_eager_fallback(data: Annotated[Any, TV_SparseSegmentSqrtN_T], indices: Annotated[Any, TV_SparseSegmentSqrtN_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtN_Tsegmentids], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentSqrtN_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids, "sparse_gradient", sparse_gradient) + _result = _execute.execute(b"SparseSegmentSqrtN", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSqrtN", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentSqrtNGrad_T = TypeVar("TV_SparseSegmentSqrtNGrad_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSqrtNGrad_Tidx = TypeVar("TV_SparseSegmentSqrtNGrad_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSqrtNGrad_Tsegmentids = TypeVar("TV_SparseSegmentSqrtNGrad_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sqrt_n_grad(grad: Annotated[Any, TV_SparseSegmentSqrtNGrad_T], indices: Annotated[Any, TV_SparseSegmentSqrtNGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_SparseSegmentSqrtNGrad_T]: + r"""Computes gradients for SparseSegmentSqrtN. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0. + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentSqrtN op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentSqrtN op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentSqrtN op. + output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentSqrtN op. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `grad`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSqrtNGrad", name, grad, indices, segment_ids, + output_dim0) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sqrt_n_grad_eager_fallback( + grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSqrtNGrad", grad=grad, indices=indices, + segment_ids=segment_ids, + output_dim0=output_dim0, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSqrtNGrad = tf_export("raw_ops.SparseSegmentSqrtNGrad")(_ops.to_raw_op(sparse_segment_sqrt_n_grad)) + + +def sparse_segment_sqrt_n_grad_eager_fallback(grad: Annotated[Any, TV_SparseSegmentSqrtNGrad_T], indices: Annotated[Any, TV_SparseSegmentSqrtNGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_SparseSegmentSqrtNGrad_T]: + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentSqrtNGrad", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_SparseSegmentSqrtNGradV2Output = collections.namedtuple( + "SparseSegmentSqrtNGradV2", + ["output", "sorted_unique_indices"]) + + +TV_SparseSegmentSqrtNGradV2_T = TypeVar("TV_SparseSegmentSqrtNGradV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSqrtNGradV2_Tidx = TypeVar("TV_SparseSegmentSqrtNGradV2_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSqrtNGradV2_Tsegmentids = TypeVar("TV_SparseSegmentSqrtNGradV2_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sqrt_n_grad_v2(grad: Annotated[Any, TV_SparseSegmentSqrtNGradV2_T], indices: Annotated[Any, TV_SparseSegmentSqrtNGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name=None): + r"""Computes gradients for SparseSegmentSqrtN. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is the number of unique indexes in "indices". Also returns vector + "sorted_unique_indices" containing the corresponding indexes from "indices". + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentSqrtN op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentSqrtN op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentSqrtN op. + dense_output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentSqrtN op. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, sorted_unique_indices). + + output: A `Tensor`. Has the same type as `grad`. + sorted_unique_indices: A `Tensor`. Has the same type as `indices`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSqrtNGradV2", name, grad, indices, segment_ids, + dense_output_dim0) + _result = _SparseSegmentSqrtNGradV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sqrt_n_grad_v2_eager_fallback( + grad, indices, segment_ids, dense_output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSqrtNGradV2", grad=grad, indices=indices, + segment_ids=segment_ids, + dense_output_dim0=dense_output_dim0, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSqrtNGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentSqrtNGradV2Output._make(_result) + return _result + +SparseSegmentSqrtNGradV2 = tf_export("raw_ops.SparseSegmentSqrtNGradV2")(_ops.to_raw_op(sparse_segment_sqrt_n_grad_v2)) + + +def sparse_segment_sqrt_n_grad_v2_eager_fallback(grad: Annotated[Any, TV_SparseSegmentSqrtNGradV2_T], indices: Annotated[Any, TV_SparseSegmentSqrtNGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name, ctx): + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + dense_output_dim0 = _ops.convert_to_tensor(dense_output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, dense_output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentSqrtNGradV2", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSqrtNGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentSqrtNGradV2Output._make(_result) + return _result + + +TV_SparseSegmentSqrtNWithNumSegments_T = TypeVar("TV_SparseSegmentSqrtNWithNumSegments_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSqrtNWithNumSegments_Tidx = TypeVar("TV_SparseSegmentSqrtNWithNumSegments_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSqrtNWithNumSegments_Tnumsegments = TypeVar("TV_SparseSegmentSqrtNWithNumSegments_Tnumsegments", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSqrtNWithNumSegments_Tsegmentids = TypeVar("TV_SparseSegmentSqrtNWithNumSegments_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sqrt_n_with_num_segments(data: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tnumsegments], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_T]: + r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N. + + N is the size of the segment being reduced. + + Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is + missing, the `output` tensor at that position will be zeroed. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Args: + data: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Should equal the number of distinct segment IDs. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSqrtNWithNumSegments", name, data, indices, + segment_ids, num_segments, "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sqrt_n_with_num_segments_eager_fallback( + data, indices, segment_ids, num_segments, + sparse_gradient=sparse_gradient, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSqrtNWithNumSegments", data=data, indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tnumsegments", + _op._get_attr_type("Tnumsegments"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSqrtNWithNumSegments = tf_export("raw_ops.SparseSegmentSqrtNWithNumSegments")(_ops.to_raw_op(sparse_segment_sqrt_n_with_num_segments)) + + +def sparse_segment_sqrt_n_with_num_segments_eager_fallback(data: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_Tnumsegments], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentSqrtNWithNumSegments_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", + _attr_Tnumsegments, "Tsegmentids", _attr_Tsegmentids, "sparse_gradient", + sparse_gradient) + _result = _execute.execute(b"SparseSegmentSqrtNWithNumSegments", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentSum_T = TypeVar("TV_SparseSegmentSum_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseSegmentSum_Tidx = TypeVar("TV_SparseSegmentSum_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSum_Tsegmentids = TypeVar("TV_SparseSegmentSum_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sum(data: Annotated[Any, TV_SparseSegmentSum_T], indices: Annotated[Any, TV_SparseSegmentSum_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSum_Tsegmentids], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentSum_T]: + r"""Computes the sum along sparse segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by `indices`. + + For example: + + ```python + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + + # Select two rows, one segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + # => [[0 0 0 0]] + + # Select two rows, two segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + # => [[ 1 2 3 4] + # [-1 -2 -3 -4]] + + # Select all rows, two segments. + tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + # => [[0 0 0 0] + # [5 6 7 8]] + + # Which is equivalent to: + tf.segment_sum(c, tf.constant([0, 0, 1])) + ``` + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSum", name, data, indices, segment_ids, + "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sum_eager_fallback( + data, indices, segment_ids, sparse_gradient=sparse_gradient, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSum", data=data, indices=indices, + segment_ids=segment_ids, + sparse_gradient=sparse_gradient, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSum = tf_export("raw_ops.SparseSegmentSum")(_ops.to_raw_op(sparse_segment_sum)) + + +def sparse_segment_sum_eager_fallback(data: Annotated[Any, TV_SparseSegmentSum_T], indices: Annotated[Any, TV_SparseSegmentSum_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSum_Tsegmentids], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentSum_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids, "sparse_gradient", sparse_gradient) + _result = _execute.execute(b"SparseSegmentSum", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseSegmentSumGrad_T = TypeVar("TV_SparseSegmentSumGrad_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSumGrad_Tidx = TypeVar("TV_SparseSegmentSumGrad_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSumGrad_Tsegmentids = TypeVar("TV_SparseSegmentSumGrad_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sum_grad(grad: Annotated[Any, TV_SparseSegmentSumGrad_T], indices: Annotated[Any, TV_SparseSegmentSumGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_SparseSegmentSumGrad_T]: + r"""Computes gradients for SparseSegmentSum. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0. + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentSum op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentSum op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentSum op. + output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentSum op. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `grad`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSumGrad", name, grad, indices, segment_ids, + output_dim0) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sum_grad_eager_fallback( + grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSumGrad", grad=grad, indices=indices, + segment_ids=segment_ids, + output_dim0=output_dim0, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSumGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSumGrad = tf_export("raw_ops.SparseSegmentSumGrad")(_ops.to_raw_op(sparse_segment_sum_grad)) + + +def sparse_segment_sum_grad_eager_fallback(grad: Annotated[Any, TV_SparseSegmentSumGrad_T], indices: Annotated[Any, TV_SparseSegmentSumGrad_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumGrad_Tsegmentids], output_dim0: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_SparseSegmentSumGrad_T]: + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentSumGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSumGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_SparseSegmentSumGradV2Output = collections.namedtuple( + "SparseSegmentSumGradV2", + ["output", "sorted_unique_indices"]) + + +TV_SparseSegmentSumGradV2_T = TypeVar("TV_SparseSegmentSumGradV2_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_SparseSegmentSumGradV2_Tidx = TypeVar("TV_SparseSegmentSumGradV2_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSumGradV2_Tsegmentids = TypeVar("TV_SparseSegmentSumGradV2_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sum_grad_v2(grad: Annotated[Any, TV_SparseSegmentSumGradV2_T], indices: Annotated[Any, TV_SparseSegmentSumGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name=None): + r"""Computes gradients for SparseSegmentSum. + + Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is the number of unique indexes in "indices". Also returns vector + "sorted_unique_indices" containing the corresponding indexes from "indices". + + Args: + grad: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + gradient propagated to the SparseSegmentSum op. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + indices passed to the corresponding SparseSegmentSum op. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + segment_ids passed to the corresponding SparseSegmentSum op. + dense_output_dim0: A `Tensor` of type `int32`. + dimension 0 of "data" passed to SparseSegmentSum op. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output, sorted_unique_indices). + + output: A `Tensor`. Has the same type as `grad`. + sorted_unique_indices: A `Tensor`. Has the same type as `indices`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSumGradV2", name, grad, indices, segment_ids, + dense_output_dim0) + _result = _SparseSegmentSumGradV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sum_grad_v2_eager_fallback( + grad, indices, segment_ids, dense_output_dim0, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSumGradV2", grad=grad, indices=indices, + segment_ids=segment_ids, + dense_output_dim0=dense_output_dim0, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tsegmentids", + _op._get_attr_type("Tsegmentids")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSumGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentSumGradV2Output._make(_result) + return _result + +SparseSegmentSumGradV2 = tf_export("raw_ops.SparseSegmentSumGradV2")(_ops.to_raw_op(sparse_segment_sum_grad_v2)) + + +def sparse_segment_sum_grad_v2_eager_fallback(grad: Annotated[Any, TV_SparseSegmentSumGradV2_T], indices: Annotated[Any, TV_SparseSegmentSumGradV2_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumGradV2_Tsegmentids], dense_output_dim0: Annotated[Any, _atypes.Int32], name, ctx): + _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + dense_output_dim0 = _ops.convert_to_tensor(dense_output_dim0, _dtypes.int32) + _inputs_flat = [grad, indices, segment_ids, dense_output_dim0] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tsegmentids", + _attr_Tsegmentids) + _result = _execute.execute(b"SparseSegmentSumGradV2", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSumGradV2", _inputs_flat, _attrs, _result) + _result = _SparseSegmentSumGradV2Output._make(_result) + return _result + + +TV_SparseSegmentSumWithNumSegments_T = TypeVar("TV_SparseSegmentSumWithNumSegments_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_SparseSegmentSumWithNumSegments_Tidx = TypeVar("TV_SparseSegmentSumWithNumSegments_Tidx", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSumWithNumSegments_Tnumsegments = TypeVar("TV_SparseSegmentSumWithNumSegments_Tnumsegments", _atypes.Int32, _atypes.Int64) +TV_SparseSegmentSumWithNumSegments_Tsegmentids = TypeVar("TV_SparseSegmentSumWithNumSegments_Tsegmentids", _atypes.Int32, _atypes.Int64) + +def sparse_segment_sum_with_num_segments(data: Annotated[Any, TV_SparseSegmentSumWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tnumsegments], sparse_gradient:bool=False, name=None) -> Annotated[Any, TV_SparseSegmentSumWithNumSegments_T]: + r"""Computes the sum along sparse segments of a tensor. + + Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + missing, the `output` tensor at that position will be zeroed. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + for an explanation of segments. + + For example: + + ```python + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + + tf.sparse_segment_sum_with_num_segments( + c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + # => [[0 0 0 0] + # [0 0 0 0] + # [0 0 0 0]] + + tf.sparse_segment_sum_with_num_segments(c, + tf.constant([0, 1]), + tf.constant([0, 2], + num_segments=4)) + # => [[ 1 2 3 4] + # [ 0 0 0 0] + # [-1 -2 -3 -4] + # [ 0 0 0 0]] + ``` + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Has same rank as `segment_ids`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A 1-D tensor. Values should be sorted and can be repeated. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Should equal the number of distinct segment IDs. + sparse_gradient: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseSegmentSumWithNumSegments", name, data, indices, + segment_ids, num_segments, "sparse_gradient", sparse_gradient) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_segment_sum_with_num_segments_eager_fallback( + data, indices, segment_ids, num_segments, + sparse_gradient=sparse_gradient, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseSegmentSumWithNumSegments", data=data, indices=indices, + segment_ids=segment_ids, + num_segments=num_segments, + sparse_gradient=sparse_gradient, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tidx", + _op._get_attr_type("Tidx"), "Tnumsegments", + _op._get_attr_type("Tnumsegments"), "Tsegmentids", + _op._get_attr_type("Tsegmentids"), "sparse_gradient", + _op._get_attr_bool("sparse_gradient")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseSegmentSumWithNumSegments = tf_export("raw_ops.SparseSegmentSumWithNumSegments")(_ops.to_raw_op(sparse_segment_sum_with_num_segments)) + + +def sparse_segment_sum_with_num_segments_eager_fallback(data: Annotated[Any, TV_SparseSegmentSumWithNumSegments_T], indices: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tidx], segment_ids: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tsegmentids], num_segments: Annotated[Any, TV_SparseSegmentSumWithNumSegments_Tnumsegments], sparse_gradient: bool, name, ctx) -> Annotated[Any, TV_SparseSegmentSumWithNumSegments_T]: + if sparse_gradient is None: + sparse_gradient = False + sparse_gradient = _execute.make_bool(sparse_gradient, "sparse_gradient") + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tsegmentids, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, indices, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", + _attr_Tnumsegments, "Tsegmentids", _attr_Tsegmentids, "sparse_gradient", + sparse_gradient) + _result = _execute.execute(b"SparseSegmentSumWithNumSegments", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sqrt_T = TypeVar("TV_Sqrt_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def sqrt(x: Annotated[Any, TV_Sqrt_T], name=None) -> Annotated[Any, TV_Sqrt_T]: + r"""Computes square root of x element-wise. + + I.e., \\(y = \sqrt{x} = x^{1/2}\\). + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sqrt", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sqrt_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sqrt", x=x, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sqrt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sqrt = tf_export("raw_ops.Sqrt")(_ops.to_raw_op(sqrt)) + + +def sqrt_eager_fallback(x: Annotated[Any, TV_Sqrt_T], name, ctx) -> Annotated[Any, TV_Sqrt_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sqrt", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sqrt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SqrtGrad_T = TypeVar("TV_SqrtGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def sqrt_grad(y: Annotated[Any, TV_SqrtGrad_T], dy: Annotated[Any, TV_SqrtGrad_T], name=None) -> Annotated[Any, TV_SqrtGrad_T]: + r"""Computes the gradient for the sqrt of `x` wrt its input. + + Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` + is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SqrtGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sqrt_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SqrtGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SqrtGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SqrtGrad = tf_export("raw_ops.SqrtGrad")(_ops.to_raw_op(sqrt_grad)) + + +def sqrt_grad_eager_fallback(y: Annotated[Any, TV_SqrtGrad_T], dy: Annotated[Any, TV_SqrtGrad_T], name, ctx) -> Annotated[Any, TV_SqrtGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SqrtGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SqrtGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Square_T = TypeVar("TV_Square_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.square', 'square') +def square(x: Annotated[Any, TV_Square_T], name=None) -> Annotated[Any, TV_Square_T]: + r"""Computes square of x element-wise. + + I.e., \\(y = x * x = x^2\\). + + >>> tf.math.square([-2., 0., 3.]) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Square", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_square( + (x, name,), None) + if _result is not NotImplemented: + return _result + return square_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + square, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_square( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Square", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + square, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Square", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Square = tf_export("raw_ops.Square")(_ops.to_raw_op(square)) +_dispatcher_for_square = square._tf_type_based_dispatcher.Dispatch + + +def square_eager_fallback(x: Annotated[Any, TV_Square_T], name, ctx) -> Annotated[Any, TV_Square_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Square", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Square", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SquaredDifference_T = TypeVar("TV_SquaredDifference_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.squared_difference', v1=['math.squared_difference', 'squared_difference']) +@deprecated_endpoints('squared_difference') +def squared_difference(x: Annotated[Any, TV_SquaredDifference_T], y: Annotated[Any, TV_SquaredDifference_T], name=None) -> Annotated[Any, TV_SquaredDifference_T]: + r"""Returns conj(x - y)(x - y) element-wise. + + *NOTE*: `math.squared_difference` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SquaredDifference", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_squared_difference( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return squared_difference_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + squared_difference, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_squared_difference( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SquaredDifference", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + squared_difference, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SquaredDifference", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SquaredDifference = tf_export("raw_ops.SquaredDifference")(_ops.to_raw_op(squared_difference)) +_dispatcher_for_squared_difference = squared_difference._tf_type_based_dispatcher.Dispatch + + +def squared_difference_eager_fallback(x: Annotated[Any, TV_SquaredDifference_T], y: Annotated[Any, TV_SquaredDifference_T], name, ctx) -> Annotated[Any, TV_SquaredDifference_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SquaredDifference", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SquaredDifference", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sub_T = TypeVar("TV_Sub_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def sub(x: Annotated[Any, TV_Sub_T], y: Annotated[Any, TV_Sub_T], name=None) -> Annotated[Any, TV_Sub_T]: + r"""Returns x - y element-wise. + + *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Both input and output have a range `(-inf, inf)`. + + Example usages below. + + Subtract operation between an array and a scalar: + + >>> x = [1, 2, 3, 4, 5] + >>> y = 1 + >>> tf.subtract(x, y) + + >>> tf.subtract(y, x) + + + Note that binary `-` operator can be used instead: + + >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5]) + >>> y = tf.convert_to_tensor(1) + >>> x - y + + + Subtract operation between an array and a tensor of same shape: + + >>> x = [1, 2, 3, 4, 5] + >>> y = tf.constant([5, 4, 3, 2, 1]) + >>> tf.subtract(y, x) + + + **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a + non-tensor, the non-tensor input will adopt (or get casted to) the data type + of the tensor input. This can potentially cause unwanted overflow or underflow + conversion. + + For example, + + >>> x = tf.constant([1, 2], dtype=tf.int8) + >>> y = [2**8 + 1, 2**8 + 2] + >>> tf.subtract(x, y) + + + When subtracting two input values of different shapes, `tf.subtract` follows the + [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules) + . The two input array shapes are compared element-wise. Starting with the + trailing dimensions, the two dimensions either have to be equal or one of them + needs to be `1`. + + For example, + + >>> x = np.ones(6).reshape(2, 3, 1) + >>> y = np.ones(6).reshape(2, 1, 3) + >>> tf.subtract(x, y) + + + Example with inputs of different dimensions: + + >>> x = np.ones(6).reshape(2, 3, 1) + >>> y = np.ones(6).reshape(1, 6) + >>> tf.subtract(x, y) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sub", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sub_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sub", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sub", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sub = tf_export("raw_ops.Sub")(_ops.to_raw_op(sub)) + + +def sub_eager_fallback(x: Annotated[Any, TV_Sub_T], y: Annotated[Any, TV_Sub_T], name, ctx) -> Annotated[Any, TV_Sub_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, _dtypes.uint32, _dtypes.uint64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Sub", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sub", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Sum_T = TypeVar("TV_Sum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Sum_Tidx = TypeVar("TV_Sum_Tidx", _atypes.Int32, _atypes.Int64) + +def _sum(input: Annotated[Any, TV_Sum_T], axis: Annotated[Any, TV_Sum_Tidx], keep_dims:bool=False, name=None) -> Annotated[Any, TV_Sum_T]: + r"""Computes the sum of elements across dimensions of a tensor. + + Reduces `input` along the dimensions given in `axis`. Unless + `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + `axis`. If `keep_dims` is true, the reduced dimensions are + retained with length 1. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The tensor to reduce. + axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The dimensions to reduce. Must be in the range + `[-rank(input), rank(input))`. + keep_dims: An optional `bool`. Defaults to `False`. + If true, retain reduced dimensions with length 1. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Sum", name, input, axis, "keep_dims", keep_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return _sum_eager_fallback( + input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Sum", input=input, reduction_indices=axis, keep_dims=keep_dims, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T", + _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Sum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Sum = tf_export("raw_ops.Sum")(_ops.to_raw_op(_sum)) + + +def _sum_eager_fallback(input: Annotated[Any, TV_Sum_T], axis: Annotated[Any, TV_Sum_Tidx], keep_dims: bool, name, ctx) -> Annotated[Any, TV_Sum_T]: + if keep_dims is None: + keep_dims = False + keep_dims = _execute.make_bool(keep_dims, "keep_dims") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [input, axis] + _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) + _result = _execute.execute(b"Sum", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Sum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Tan_T = TypeVar("TV_Tan_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.tan', 'tan') +def tan(x: Annotated[Any, TV_Tan_T], name=None) -> Annotated[Any, TV_Tan_T]: + r"""Computes tan of x element-wise. + + Given an input tensor, this function computes tangent of every + element in the tensor. Input range is `(-inf, inf)` and + output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + is returned. + + ```python + x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] + ``` + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Tan", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_tan( + (x, name,), None) + if _result is not NotImplemented: + return _result + return tan_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + tan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_tan( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Tan", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + tan, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Tan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Tan = tf_export("raw_ops.Tan")(_ops.to_raw_op(tan)) +_dispatcher_for_tan = tan._tf_type_based_dispatcher.Dispatch + + +def tan_eager_fallback(x: Annotated[Any, TV_Tan_T], name, ctx) -> Annotated[Any, TV_Tan_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Tan", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Tan", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Tanh_T = TypeVar("TV_Tanh_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.tanh', 'nn.tanh', 'tanh') +def tanh(x: Annotated[Any, TV_Tanh_T], name=None) -> Annotated[Any, TV_Tanh_T]: + r"""Computes hyperbolic tangent of `x` element-wise. + + Given an input tensor, this function computes hyperbolic tangent of every + element in the tensor. Input range is `[-inf, inf]` and + output range is `[-1,1]`. + + >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + >>> tf.math.tanh(x) + + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Tanh", name, x) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_tanh( + (x, name,), None) + if _result is not NotImplemented: + return _result + return tanh_eager_fallback( + x, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + tanh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_tanh( + (x, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Tanh", x=x, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + tanh, (), dict(x=x, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Tanh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Tanh = tf_export("raw_ops.Tanh")(_ops.to_raw_op(tanh)) +_dispatcher_for_tanh = tanh._tf_type_based_dispatcher.Dispatch + + +def tanh_eager_fallback(x: Annotated[Any, TV_Tanh_T], name, ctx) -> Annotated[Any, TV_Tanh_T]: + _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + _inputs_flat = [x] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Tanh", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Tanh", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TanhGrad_T = TypeVar("TV_TanhGrad_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def tanh_grad(y: Annotated[Any, TV_TanhGrad_T], dy: Annotated[Any, TV_TanhGrad_T], name=None) -> Annotated[Any, TV_TanhGrad_T]: + r"""Computes the gradient for the tanh of `x` wrt its input. + + Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` + is the corresponding input gradient. + + Args: + y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + dy: A `Tensor`. Must have the same type as `y`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `y`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TanhGrad", name, y, dy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tanh_grad_eager_fallback( + y, dy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TanhGrad", y=y, dy=dy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TanhGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TanhGrad = tf_export("raw_ops.TanhGrad")(_ops.to_raw_op(tanh_grad)) + + +def tanh_grad_eager_fallback(y: Annotated[Any, TV_TanhGrad_T], dy: Annotated[Any, TV_TanhGrad_T], name, ctx) -> Annotated[Any, TV_TanhGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (y, dy) = _inputs_T + _inputs_flat = [y, dy] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"TanhGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TanhGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TruncateDiv_T = TypeVar("TV_TruncateDiv_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('truncatediv') +def truncate_div(x: Annotated[Any, TV_TruncateDiv_T], y: Annotated[Any, TV_TruncateDiv_T], name=None) -> Annotated[Any, TV_TruncateDiv_T]: + r"""Returns x / y element-wise, rounded towards zero. + + Truncation designates that negative numbers will round fractional quantities + toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + than Python semantics. See `FloorDiv` for a division function that matches + Python Semantics. + + *NOTE*: `truncatediv` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TruncateDiv", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_truncate_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return truncate_div_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + truncate_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_truncate_div( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TruncateDiv", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + truncate_div, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TruncateDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TruncateDiv = tf_export("raw_ops.TruncateDiv")(_ops.to_raw_op(truncate_div)) +_dispatcher_for_truncate_div = truncate_div._tf_type_based_dispatcher.Dispatch + + +def truncate_div_eager_fallback(x: Annotated[Any, TV_TruncateDiv_T], y: Annotated[Any, TV_TruncateDiv_T], name, ctx) -> Annotated[Any, TV_TruncateDiv_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.uint8, _dtypes.int8, _dtypes.uint16, _dtypes.int16, _dtypes.int32, _dtypes.uint32, _dtypes.uint64, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"TruncateDiv", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TruncateDiv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TruncateMod_T = TypeVar("TV_TruncateMod_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('truncatemod') +def truncate_mod(x: Annotated[Any, TV_TruncateMod_T], y: Annotated[Any, TV_TruncateMod_T], name=None) -> Annotated[Any, TV_TruncateMod_T]: + r"""Returns element-wise remainder of division. This emulates C semantics in that + + the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + y + truncate_mod(x, y) = x`. + + *NOTE*: `truncatemod` supports broadcasting. More about broadcasting + [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + + Args: + x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TruncateMod", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_truncate_mod( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return truncate_mod_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + truncate_mod, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_truncate_mod( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TruncateMod", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + truncate_mod, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TruncateMod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TruncateMod = tf_export("raw_ops.TruncateMod")(_ops.to_raw_op(truncate_mod)) +_dispatcher_for_truncate_mod = truncate_mod._tf_type_based_dispatcher.Dispatch + + +def truncate_mod_eager_fallback(x: Annotated[Any, TV_TruncateMod_T], y: Annotated[Any, TV_TruncateMod_T], name, ctx) -> Annotated[Any, TV_TruncateMod_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"TruncateMod", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TruncateMod", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnsortedSegmentMax_T = TypeVar("TV_UnsortedSegmentMax_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_UnsortedSegmentMax_Tindices = TypeVar("TV_UnsortedSegmentMax_Tindices", _atypes.Int32, _atypes.Int64) +TV_UnsortedSegmentMax_Tnumsegments = TypeVar("TV_UnsortedSegmentMax_Tnumsegments", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.unsorted_segment_max', v1=['math.unsorted_segment_max', 'unsorted_segment_max']) +@deprecated_endpoints('unsorted_segment_max') +def unsorted_segment_max(data: Annotated[Any, TV_UnsortedSegmentMax_T], segment_ids: Annotated[Any, TV_UnsortedSegmentMax_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentMax_Tnumsegments], name=None) -> Annotated[Any, TV_UnsortedSegmentMax_T]: + r"""Computes the maximum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + This operator is similar to `tf.math.unsorted_segment_sum`, + Instead of computing the sum over segments, it computes the maximum such that: + + \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such + that `segment_ids[j...] == i`. + + If the maximum is empty for a given segment ID `i`, it outputs the smallest + possible value for the specific numeric type, + `output[i] = numeric_limits::lowest()`. + + If the given segment ID `i` is negative, then the corresponding value is + dropped, and will not be included in the result. + + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + +
+ +
+ + For example: + + >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + array([[4, 3, 3, 4], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor whose shape is a prefix of `data.shape`. + The values must be less than `num_segments`. + + Caution: The values are always validated to be in range on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnsortedSegmentMax", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unsorted_segment_max( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + return unsorted_segment_max_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_max, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unsorted_segment_max( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnsortedSegmentMax", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_max, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnsortedSegmentMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnsortedSegmentMax = tf_export("raw_ops.UnsortedSegmentMax")(_ops.to_raw_op(unsorted_segment_max)) +_dispatcher_for_unsorted_segment_max = unsorted_segment_max._tf_type_based_dispatcher.Dispatch + + +def unsorted_segment_max_eager_fallback(data: Annotated[Any, TV_UnsortedSegmentMax_T], segment_ids: Annotated[Any, TV_UnsortedSegmentMax_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentMax_Tnumsegments], name, ctx) -> Annotated[Any, TV_UnsortedSegmentMax_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"UnsortedSegmentMax", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnsortedSegmentMax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnsortedSegmentMin_T = TypeVar("TV_UnsortedSegmentMin_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_UnsortedSegmentMin_Tindices = TypeVar("TV_UnsortedSegmentMin_Tindices", _atypes.Int32, _atypes.Int64) +TV_UnsortedSegmentMin_Tnumsegments = TypeVar("TV_UnsortedSegmentMin_Tnumsegments", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.unsorted_segment_min', v1=['math.unsorted_segment_min', 'unsorted_segment_min']) +@deprecated_endpoints('unsorted_segment_min') +def unsorted_segment_min(data: Annotated[Any, TV_UnsortedSegmentMin_T], segment_ids: Annotated[Any, TV_UnsortedSegmentMin_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentMin_Tnumsegments], name=None) -> Annotated[Any, TV_UnsortedSegmentMin_T]: + r"""Computes the minimum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + This operator is similar to `tf.math.unsorted_segment_sum`, + Instead of computing the sum over segments, it computes the minimum such that: + + \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such + that `segment_ids[j...] == i`. + + If the minimum is empty for a given segment ID `i`, it outputs the largest + possible value for the specific numeric type, + `output[i] = numeric_limits::max()`. + + For example: + + >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + array([[1, 2, 2, 1], + [5, 6, 7, 8]], dtype=int32) + + If the given segment ID `i` is negative, then the corresponding value is + dropped, and will not be included in the result. + + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor whose shape is a prefix of `data.shape`. + The values must be less than `num_segments`. + + Caution: The values are always validated to be in range on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnsortedSegmentMin", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unsorted_segment_min( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + return unsorted_segment_min_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_min, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unsorted_segment_min( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnsortedSegmentMin", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_min, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnsortedSegmentMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnsortedSegmentMin = tf_export("raw_ops.UnsortedSegmentMin")(_ops.to_raw_op(unsorted_segment_min)) +_dispatcher_for_unsorted_segment_min = unsorted_segment_min._tf_type_based_dispatcher.Dispatch + + +def unsorted_segment_min_eager_fallback(data: Annotated[Any, TV_UnsortedSegmentMin_T], segment_ids: Annotated[Any, TV_UnsortedSegmentMin_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentMin_Tnumsegments], name, ctx) -> Annotated[Any, TV_UnsortedSegmentMin_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"UnsortedSegmentMin", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnsortedSegmentMin", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnsortedSegmentProd_T = TypeVar("TV_UnsortedSegmentProd_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_UnsortedSegmentProd_Tindices = TypeVar("TV_UnsortedSegmentProd_Tindices", _atypes.Int32, _atypes.Int64) +TV_UnsortedSegmentProd_Tnumsegments = TypeVar("TV_UnsortedSegmentProd_Tnumsegments", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.unsorted_segment_prod', v1=['math.unsorted_segment_prod', 'unsorted_segment_prod']) +@deprecated_endpoints('unsorted_segment_prod') +def unsorted_segment_prod(data: Annotated[Any, TV_UnsortedSegmentProd_T], segment_ids: Annotated[Any, TV_UnsortedSegmentProd_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentProd_Tnumsegments], name=None) -> Annotated[Any, TV_UnsortedSegmentProd_T]: + r"""Computes the product along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + This operator is similar to `tf.math.unsorted_segment_sum`, + Instead of computing the sum over segments, it computes the product of all + entries belonging to a segment such that: + + \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples + `j...` such that `segment_ids[j...] == i`. + + For example: + + >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + array([[4, 6, 6, 4], + [5, 6, 7, 8]], dtype=int32) + + If there is no entry for a given segment ID `i`, it outputs 1. + + If the given segment ID `i` is negative, then the corresponding value is + dropped, and will not be included in the result. + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor whose shape is a prefix of `data.shape`. + The values must be less than `num_segments`. + + Caution: The values are always validated to be in range on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnsortedSegmentProd", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unsorted_segment_prod( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + return unsorted_segment_prod_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_prod, (), dict(data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unsorted_segment_prod( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnsortedSegmentProd", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_prod, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnsortedSegmentProd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnsortedSegmentProd = tf_export("raw_ops.UnsortedSegmentProd")(_ops.to_raw_op(unsorted_segment_prod)) +_dispatcher_for_unsorted_segment_prod = unsorted_segment_prod._tf_type_based_dispatcher.Dispatch + + +def unsorted_segment_prod_eager_fallback(data: Annotated[Any, TV_UnsortedSegmentProd_T], segment_ids: Annotated[Any, TV_UnsortedSegmentProd_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentProd_Tnumsegments], name, ctx) -> Annotated[Any, TV_UnsortedSegmentProd_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"UnsortedSegmentProd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnsortedSegmentProd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_UnsortedSegmentSum_T = TypeVar("TV_UnsortedSegmentSum_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_UnsortedSegmentSum_Tindices = TypeVar("TV_UnsortedSegmentSum_Tindices", _atypes.Int16, _atypes.Int32, _atypes.Int64) +TV_UnsortedSegmentSum_Tnumsegments = TypeVar("TV_UnsortedSegmentSum_Tnumsegments", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.unsorted_segment_sum', v1=['math.unsorted_segment_sum', 'unsorted_segment_sum']) +@deprecated_endpoints('unsorted_segment_sum') +def unsorted_segment_sum(data: Annotated[Any, TV_UnsortedSegmentSum_T], segment_ids: Annotated[Any, TV_UnsortedSegmentSum_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentSum_Tnumsegments], name=None) -> Annotated[Any, TV_UnsortedSegmentSum_T]: + r"""Computes the sum along segments of a tensor. + + Read + [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + for an explanation of segments. + + Computes a tensor such that + \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such + that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + need not be sorted and need not cover all values in the full + range of valid values. + + If the sum is empty for a given segment ID `i`, `output[i] = 0`. + If the given segment ID `i` is negative, the value is dropped and will not be + added to the sum of the segment. + + `num_segments` should equal the number of distinct segment IDs. + + Caution: On CPU, values in `segment_ids` are always validated to be less than + `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + result in safe but unspecified behavior, which may include ignoring + out-of-bound indices or outputting a tensor with a 0 stored in the first + dimension of its shape if `num_segments` is 0. + +
+ +
+ + >>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] + >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() + array([[5, 5, 5, 5], + [5, 6, 7, 8]], dtype=int32) + + Args: + data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + segment_ids: A `Tensor`. Must be one of the following types: `int16`, `int32`, `int64`. + A tensor whose shape is a prefix of `data.shape`. + The values must be less than `num_segments`. + + Caution: The values are always validated to be in range on CPU, never validated + on GPU. + num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `data`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnsortedSegmentSum", name, data, segment_ids, num_segments) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_unsorted_segment_sum( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + return unsorted_segment_sum_eager_fallback( + data, segment_ids, num_segments, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_sum, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_unsorted_segment_sum( + (data, segment_ids, num_segments, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnsortedSegmentSum", data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + unsorted_segment_sum, (), dict(data=data, segment_ids=segment_ids, + num_segments=num_segments, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "Tnumsegments", + _op._get_attr_type("Tnumsegments")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnsortedSegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnsortedSegmentSum = tf_export("raw_ops.UnsortedSegmentSum")(_ops.to_raw_op(unsorted_segment_sum)) +_dispatcher_for_unsorted_segment_sum = unsorted_segment_sum._tf_type_based_dispatcher.Dispatch + + +def unsorted_segment_sum_eager_fallback(data: Annotated[Any, TV_UnsortedSegmentSum_T], segment_ids: Annotated[Any, TV_UnsortedSegmentSum_Tindices], num_segments: Annotated[Any, TV_UnsortedSegmentSum_Tnumsegments], name, ctx) -> Annotated[Any, TV_UnsortedSegmentSum_T]: + _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], ctx, [_dtypes.int16, _dtypes.int32, _dtypes.int64, ]) + _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _inputs_flat = [data, segment_ids, num_segments] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", + _attr_Tnumsegments) + _result = _execute.execute(b"UnsortedSegmentSum", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnsortedSegmentSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Xdivy_T = TypeVar("TV_Xdivy_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def xdivy(x: Annotated[Any, TV_Xdivy_T], y: Annotated[Any, TV_Xdivy_T], name=None) -> Annotated[Any, TV_Xdivy_T]: + r"""Returns 0 if x == 0, and x / y otherwise, elementwise. + + Args: + x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Xdivy", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return xdivy_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Xdivy", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Xdivy", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Xdivy = tf_export("raw_ops.Xdivy")(_ops.to_raw_op(xdivy)) + + +def xdivy_eager_fallback(x: Annotated[Any, TV_Xdivy_T], y: Annotated[Any, TV_Xdivy_T], name, ctx) -> Annotated[Any, TV_Xdivy_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Xdivy", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Xdivy", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Xlog1py_T = TypeVar("TV_Xlog1py_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +def xlog1py(x: Annotated[Any, TV_Xlog1py_T], y: Annotated[Any, TV_Xlog1py_T], name=None) -> Annotated[Any, TV_Xlog1py_T]: + r"""Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. + + Args: + x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Xlog1py", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return xlog1py_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Xlog1py", x=x, y=y, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Xlog1py", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Xlog1py = tf_export("raw_ops.Xlog1py")(_ops.to_raw_op(xlog1py)) + + +def xlog1py_eager_fallback(x: Annotated[Any, TV_Xlog1py_T], y: Annotated[Any, TV_Xlog1py_T], name, ctx) -> Annotated[Any, TV_Xlog1py_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Xlog1py", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Xlog1py", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Xlogy_T = TypeVar("TV_Xlogy_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.xlogy') +def xlogy(x: Annotated[Any, TV_Xlogy_T], y: Annotated[Any, TV_Xlogy_T], name=None) -> Annotated[Any, TV_Xlogy_T]: + r"""Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + + Args: + x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `complex64`, `complex128`. + y: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Xlogy", name, x, y) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xlogy( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + return xlogy_eager_fallback( + x, y, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xlogy, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xlogy( + (x, y, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Xlogy", x=x, y=y, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xlogy, (), dict(x=x, y=y, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Xlogy", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Xlogy = tf_export("raw_ops.Xlogy")(_ops.to_raw_op(xlogy)) +_dispatcher_for_xlogy = xlogy._tf_type_based_dispatcher.Dispatch + + +def xlogy_eager_fallback(x: Annotated[Any, TV_Xlogy_T], y: Annotated[Any, TV_Xlogy_T], name, ctx) -> Annotated[Any, TV_Xlogy_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (x, y) = _inputs_T + _inputs_flat = [x, y] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Xlogy", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Xlogy", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_Zeta_T = TypeVar("TV_Zeta_T", _atypes.Float32, _atypes.Float64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('math.zeta', v1=['math.zeta', 'zeta']) +@deprecated_endpoints('zeta') +def zeta(x: Annotated[Any, TV_Zeta_T], q: Annotated[Any, TV_Zeta_T], name=None) -> Annotated[Any, TV_Zeta_T]: + r"""Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + + The Hurwitz zeta function is defined as: + + + \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) + + Args: + x: A `Tensor`. Must be one of the following types: `float32`, `float64`. + q: A `Tensor`. Must have the same type as `x`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `x`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Zeta", name, x, q) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_zeta( + (x, q, name,), None) + if _result is not NotImplemented: + return _result + return zeta_eager_fallback( + x, q, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + zeta, (), dict(x=x, q=q, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_zeta( + (x, q, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Zeta", x=x, q=q, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + zeta, (), dict(x=x, q=q, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Zeta", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Zeta = tf_export("raw_ops.Zeta")(_ops.to_raw_op(zeta)) +_dispatcher_for_zeta = zeta._tf_type_based_dispatcher.Dispatch + + +def zeta_eager_fallback(x: Annotated[Any, TV_Zeta_T], q: Annotated[Any, TV_Zeta_T], name, ctx) -> Annotated[Any, TV_Zeta_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([x, q], ctx, [_dtypes.float32, _dtypes.float64, ]) + (x, q) = _inputs_T + _inputs_flat = [x, q] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"Zeta", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Zeta", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e7aa90eb3843e2a755557a77c2a1f93dca219d64 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_ragged_math_ops.py @@ -0,0 +1,120 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated +_RaggedRangeOutput = collections.namedtuple( + "RaggedRange", + ["rt_nested_splits", "rt_dense_values"]) + + +TV_RaggedRange_T = TypeVar("TV_RaggedRange_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) +TV_RaggedRange_Tsplits = TypeVar("TV_RaggedRange_Tsplits", _atypes.Int32, _atypes.Int64) + +def ragged_range(starts: Annotated[Any, TV_RaggedRange_T], limits: Annotated[Any, TV_RaggedRange_T], deltas: Annotated[Any, TV_RaggedRange_T], Tsplits:TV_RaggedRange_Tsplits=_dtypes.int64, name=None): + r"""Returns a `RaggedTensor` containing the specified sequences of numbers. + + + Returns a `RaggedTensor` `result` composed from `rt_dense_values` and + `rt_nested_splits`, such that + `result[i] = range(starts[i], limits[i], deltas[i])`. + + ```python + (rt_nested_splits, rt_dense_values) = ragged_range( + starts=[2, 5, 8], limits=[3, 5, 12], deltas=1) + result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits) + print(result) + + ``` + + The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. + The vector inputs must all have the same size. Scalar inputs are broadcast + to match the size of the vector inputs. + + Args: + starts: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`. + The starts of each range. + limits: A `Tensor`. Must have the same type as `starts`. + The limits of each range. + deltas: A `Tensor`. Must have the same type as `starts`. + The deltas of each range. + Tsplits: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (rt_nested_splits, rt_dense_values). + + rt_nested_splits: A `Tensor` of type `Tsplits`. + rt_dense_values: A `Tensor`. Has the same type as `starts`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RaggedRange", name, starts, limits, deltas, "Tsplits", Tsplits) + _result = _RaggedRangeOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ragged_range_eager_fallback( + starts, limits, deltas, Tsplits=Tsplits, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RaggedRange", starts=starts, limits=limits, deltas=deltas, + Tsplits=Tsplits, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tsplits", + _op._get_attr_type("Tsplits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RaggedRange", _inputs_flat, _attrs, _result) + _result = _RaggedRangeOutput._make(_result) + return _result + +RaggedRange = tf_export("raw_ops.RaggedRange")(_ops.to_raw_op(ragged_range)) + + +def ragged_range_eager_fallback(starts: Annotated[Any, TV_RaggedRange_T], limits: Annotated[Any, TV_RaggedRange_T], deltas: Annotated[Any, TV_RaggedRange_T], Tsplits: TV_RaggedRange_Tsplits, name, ctx): + if Tsplits is None: + Tsplits = _dtypes.int64 + Tsplits = _execute.make_type(Tsplits, "Tsplits") + _attr_T, _inputs_T = _execute.args_to_matching_eager([starts, limits, deltas], ctx, [_dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ], _dtypes.int32) + (starts, limits, deltas) = _inputs_T + _inputs_flat = [starts, limits, deltas] + _attrs = ("T", _attr_T, "Tsplits", Tsplits) + _result = _execute.execute(b"RaggedRange", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RaggedRange", _inputs_flat, _attrs, _result) + _result = _RaggedRangeOutput._make(_result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateless_random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateless_random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..3ecd7b200f8a68b67384f0cfd20b6ed7e3182ade --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateless_random_ops.py @@ -0,0 +1,810 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_StatelessMultinomial_T = TypeVar("TV_StatelessMultinomial_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_StatelessMultinomial_Tseed = TypeVar("TV_StatelessMultinomial_Tseed", _atypes.Int32, _atypes.Int64) +TV_StatelessMultinomial_output_dtype = TypeVar("TV_StatelessMultinomial_output_dtype", _atypes.Int32, _atypes.Int64) + +def stateless_multinomial(logits: Annotated[Any, TV_StatelessMultinomial_T], num_samples: Annotated[Any, _atypes.Int32], seed: Annotated[Any, TV_StatelessMultinomial_Tseed], output_dtype:TV_StatelessMultinomial_output_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_StatelessMultinomial_output_dtype]: + r"""Draws samples from a multinomial distribution. + + Args: + logits: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes. + num_samples: A `Tensor` of type `int32`. + 0-D. Number of independent samples to draw for each row slice. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `output_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessMultinomial", name, logits, num_samples, seed, + "output_dtype", output_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_multinomial_eager_fallback( + logits, num_samples, seed, output_dtype=output_dtype, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if output_dtype is None: + output_dtype = _dtypes.int64 + output_dtype = _execute.make_type(output_dtype, "output_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessMultinomial", logits=logits, num_samples=num_samples, + seed=seed, output_dtype=output_dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tseed", + _op._get_attr_type("Tseed"), "output_dtype", + _op._get_attr_type("output_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessMultinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessMultinomial = tf_export("raw_ops.StatelessMultinomial")(_ops.to_raw_op(stateless_multinomial)) + + +def stateless_multinomial_eager_fallback(logits: Annotated[Any, TV_StatelessMultinomial_T], num_samples: Annotated[Any, _atypes.Int32], seed: Annotated[Any, TV_StatelessMultinomial_Tseed], output_dtype: TV_StatelessMultinomial_output_dtype, name, ctx) -> Annotated[Any, TV_StatelessMultinomial_output_dtype]: + if output_dtype is None: + output_dtype = _dtypes.int64 + output_dtype = _execute.make_type(output_dtype, "output_dtype") + _attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + num_samples = _ops.convert_to_tensor(num_samples, _dtypes.int32) + _inputs_flat = [logits, num_samples, seed] + _attrs = ("T", _attr_T, "Tseed", _attr_Tseed, "output_dtype", output_dtype) + _result = _execute.execute(b"StatelessMultinomial", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessMultinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessParameterizedTruncatedNormal_S = TypeVar("TV_StatelessParameterizedTruncatedNormal_S", _atypes.Int32, _atypes.Int64) +TV_StatelessParameterizedTruncatedNormal_Tseed = TypeVar("TV_StatelessParameterizedTruncatedNormal_Tseed", _atypes.Int32, _atypes.Int64) +TV_StatelessParameterizedTruncatedNormal_dtype = TypeVar("TV_StatelessParameterizedTruncatedNormal_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half) + +def stateless_parameterized_truncated_normal(shape: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_S], seed: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_Tseed], means: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], stddevs: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], minvals: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], maxvals: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], name=None) -> Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype]: + r"""TODO: add doc. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + means: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. + The mean parameter of each batch. + stddevs: A `Tensor`. Must have the same type as `means`. + The standard deviation parameter of each batch. Must be greater than 0. + minvals: A `Tensor`. Must have the same type as `means`. + The minimum cutoff. May be -infinity. + maxvals: A `Tensor`. Must have the same type as `means`. + The maximum cutoff. May be +infinity, and must be more than the minval + for each batch. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `means`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessParameterizedTruncatedNormal", name, shape, seed, + means, stddevs, minvals, maxvals) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_parameterized_truncated_normal_eager_fallback( + shape, seed, means, stddevs, minvals, maxvals, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessParameterizedTruncatedNormal", shape=shape, seed=seed, + means=means, stddevs=stddevs, + minvals=minvals, + maxvals=maxvals, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("S", _op._get_attr_type("S"), "Tseed", + _op._get_attr_type("Tseed"), "dtype", + _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessParameterizedTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessParameterizedTruncatedNormal = tf_export("raw_ops.StatelessParameterizedTruncatedNormal")(_ops.to_raw_op(stateless_parameterized_truncated_normal)) + + +def stateless_parameterized_truncated_normal_eager_fallback(shape: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_S], seed: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_Tseed], means: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], stddevs: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], minvals: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], maxvals: Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype], name, ctx) -> Annotated[Any, TV_StatelessParameterizedTruncatedNormal_dtype]: + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([means, stddevs, minvals, maxvals], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) + (means, stddevs, minvals, maxvals) = _inputs_dtype + _inputs_flat = [shape, seed, means, stddevs, minvals, maxvals] + _attrs = ("S", _attr_S, "Tseed", _attr_Tseed, "dtype", _attr_dtype) + _result = _execute.execute(b"StatelessParameterizedTruncatedNormal", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessParameterizedTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomBinomial_S = TypeVar("TV_StatelessRandomBinomial_S", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomBinomial_Tseed = TypeVar("TV_StatelessRandomBinomial_Tseed", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomBinomial_T = TypeVar("TV_StatelessRandomBinomial_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_StatelessRandomBinomial_dtype = TypeVar("TV_StatelessRandomBinomial_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def stateless_random_binomial(shape: Annotated[Any, TV_StatelessRandomBinomial_S], seed: Annotated[Any, TV_StatelessRandomBinomial_Tseed], counts: Annotated[Any, TV_StatelessRandomBinomial_T], probs: Annotated[Any, TV_StatelessRandomBinomial_T], dtype:TV_StatelessRandomBinomial_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_StatelessRandomBinomial_dtype]: + r"""Outputs deterministic pseudorandom random numbers from a binomial distribution. + + Outputs random values from a binomial distribution. + + The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + counts: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`. + The counts of the binomial distribution. Must be broadcastable with `probs`, + and broadcastable with the rightmost dimensions of `shape`. + probs: A `Tensor`. Must have the same type as `counts`. + The probability of success for the binomial distribution. Must be broadcastable + with `counts` and broadcastable with the rightmost dimensions of `shape`. + dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomBinomial", name, shape, seed, counts, probs, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_binomial_eager_fallback( + shape, seed, counts, probs, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomBinomial", shape=shape, seed=seed, counts=counts, + probs=probs, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("S", _op._get_attr_type("S"), "Tseed", + _op._get_attr_type("Tseed"), "T", _op._get_attr_type("T"), + "dtype", _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomBinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomBinomial = tf_export("raw_ops.StatelessRandomBinomial")(_ops.to_raw_op(stateless_random_binomial)) + + +def stateless_random_binomial_eager_fallback(shape: Annotated[Any, TV_StatelessRandomBinomial_S], seed: Annotated[Any, TV_StatelessRandomBinomial_Tseed], counts: Annotated[Any, TV_StatelessRandomBinomial_T], probs: Annotated[Any, TV_StatelessRandomBinomial_T], dtype: TV_StatelessRandomBinomial_dtype, name, ctx) -> Annotated[Any, TV_StatelessRandomBinomial_dtype]: + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _attr_T, _inputs_T = _execute.args_to_matching_eager([counts, probs], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ], _dtypes.float64) + (counts, probs) = _inputs_T + _inputs_flat = [shape, seed, counts, probs] + _attrs = ("S", _attr_S, "Tseed", _attr_Tseed, "T", _attr_T, "dtype", dtype) + _result = _execute.execute(b"StatelessRandomBinomial", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomBinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomGammaV2_dtype = TypeVar("TV_StatelessRandomGammaV2_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_StatelessRandomGammaV2_T = TypeVar("TV_StatelessRandomGammaV2_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomGammaV2_Tseed = TypeVar("TV_StatelessRandomGammaV2_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_random_gamma_v2(shape: Annotated[Any, TV_StatelessRandomGammaV2_T], seed: Annotated[Any, TV_StatelessRandomGammaV2_Tseed], alpha: Annotated[Any, TV_StatelessRandomGammaV2_dtype], name=None) -> Annotated[Any, TV_StatelessRandomGammaV2_dtype]: + r"""Outputs deterministic pseudorandom random numbers from a gamma distribution. + + Outputs random values from a gamma distribution. + + The outputs are a deterministic function of `shape`, `seed`, and `alpha`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + alpha: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. + The concentration of the gamma distribution. Shape must match the rightmost + dimensions of `shape`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `alpha`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomGammaV2", name, shape, seed, alpha) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_gamma_v2_eager_fallback( + shape, seed, alpha, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomGammaV2", shape=shape, seed=seed, alpha=alpha, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomGammaV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomGammaV2 = tf_export("raw_ops.StatelessRandomGammaV2")(_ops.to_raw_op(stateless_random_gamma_v2)) + + +def stateless_random_gamma_v2_eager_fallback(shape: Annotated[Any, TV_StatelessRandomGammaV2_T], seed: Annotated[Any, TV_StatelessRandomGammaV2_Tseed], alpha: Annotated[Any, TV_StatelessRandomGammaV2_dtype], name, ctx) -> Annotated[Any, TV_StatelessRandomGammaV2_dtype]: + _attr_dtype, (alpha,) = _execute.args_to_matching_eager([alpha], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed, alpha] + _attrs = ("dtype", _attr_dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessRandomGammaV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomGammaV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomNormal_dtype = TypeVar("TV_StatelessRandomNormal_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_StatelessRandomNormal_T = TypeVar("TV_StatelessRandomNormal_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomNormal_Tseed = TypeVar("TV_StatelessRandomNormal_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_random_normal(shape: Annotated[Any, TV_StatelessRandomNormal_T], seed: Annotated[Any, TV_StatelessRandomNormal_Tseed], dtype:TV_StatelessRandomNormal_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatelessRandomNormal_dtype]: + r"""Outputs deterministic pseudorandom values from a normal distribution. + + The generated values will have mean 0 and standard deviation 1. + + The outputs are a deterministic function of `shape` and `seed`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomNormal", name, shape, seed, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_normal_eager_fallback( + shape, seed, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomNormal", shape=shape, seed=seed, dtype=dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomNormal = tf_export("raw_ops.StatelessRandomNormal")(_ops.to_raw_op(stateless_random_normal)) + + +def stateless_random_normal_eager_fallback(shape: Annotated[Any, TV_StatelessRandomNormal_T], seed: Annotated[Any, TV_StatelessRandomNormal_Tseed], dtype: TV_StatelessRandomNormal_dtype, name, ctx) -> Annotated[Any, TV_StatelessRandomNormal_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed] + _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessRandomNormal", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomPoisson_Rtype = TypeVar("TV_StatelessRandomPoisson_Rtype", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_StatelessRandomPoisson_dtype = TypeVar("TV_StatelessRandomPoisson_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_StatelessRandomPoisson_T = TypeVar("TV_StatelessRandomPoisson_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomPoisson_Tseed = TypeVar("TV_StatelessRandomPoisson_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_random_poisson(shape: Annotated[Any, TV_StatelessRandomPoisson_T], seed: Annotated[Any, TV_StatelessRandomPoisson_Tseed], lam: Annotated[Any, TV_StatelessRandomPoisson_Rtype], dtype: TV_StatelessRandomPoisson_dtype, name=None) -> Annotated[Any, TV_StatelessRandomPoisson_dtype]: + r"""Outputs deterministic pseudorandom random numbers from a Poisson distribution. + + Outputs random values from a Poisson distribution. + + The outputs are a deterministic function of `shape`, `seed`, and `lam`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + lam: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`. + The rate of the Poisson distribution. Shape must match the rightmost dimensions + of `shape`. + dtype: A `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomPoisson", name, shape, seed, lam, "dtype", + dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_poisson_eager_fallback( + shape, seed, lam, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomPoisson", shape=shape, seed=seed, lam=lam, + dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Rtype", _op._get_attr_type("Rtype"), "dtype", + _op._get_attr_type("dtype"), "T", _op._get_attr_type("T"), + "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomPoisson", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomPoisson = tf_export("raw_ops.StatelessRandomPoisson")(_ops.to_raw_op(stateless_random_poisson)) + + +def stateless_random_poisson_eager_fallback(shape: Annotated[Any, TV_StatelessRandomPoisson_T], seed: Annotated[Any, TV_StatelessRandomPoisson_Tseed], lam: Annotated[Any, TV_StatelessRandomPoisson_Rtype], dtype: TV_StatelessRandomPoisson_dtype, name, ctx) -> Annotated[Any, TV_StatelessRandomPoisson_dtype]: + dtype = _execute.make_type(dtype, "dtype") + _attr_Rtype, (lam,) = _execute.args_to_matching_eager([lam], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed, lam] + _attrs = ("Rtype", _attr_Rtype, "dtype", dtype, "T", _attr_T, "Tseed", + _attr_Tseed) + _result = _execute.execute(b"StatelessRandomPoisson", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomPoisson", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomUniform_dtype = TypeVar("TV_StatelessRandomUniform_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_StatelessRandomUniform_T = TypeVar("TV_StatelessRandomUniform_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomUniform_Tseed = TypeVar("TV_StatelessRandomUniform_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_random_uniform(shape: Annotated[Any, TV_StatelessRandomUniform_T], seed: Annotated[Any, TV_StatelessRandomUniform_Tseed], dtype:TV_StatelessRandomUniform_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatelessRandomUniform_dtype]: + r"""Outputs deterministic pseudorandom random values from a uniform distribution. + + The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded. + + The outputs are a deterministic function of `shape` and `seed`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomUniform", name, shape, seed, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_uniform_eager_fallback( + shape, seed, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomUniform", shape=shape, seed=seed, dtype=dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomUniform = tf_export("raw_ops.StatelessRandomUniform")(_ops.to_raw_op(stateless_random_uniform)) + + +def stateless_random_uniform_eager_fallback(shape: Annotated[Any, TV_StatelessRandomUniform_T], seed: Annotated[Any, TV_StatelessRandomUniform_Tseed], dtype: TV_StatelessRandomUniform_dtype, name, ctx) -> Annotated[Any, TV_StatelessRandomUniform_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed] + _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessRandomUniform", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomUniformFullInt_dtype = TypeVar("TV_StatelessRandomUniformFullInt_dtype", _atypes.Int32, _atypes.Int64, _atypes.UInt32, _atypes.UInt64) +TV_StatelessRandomUniformFullInt_T = TypeVar("TV_StatelessRandomUniformFullInt_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomUniformFullInt_Tseed = TypeVar("TV_StatelessRandomUniformFullInt_Tseed", _atypes.Int32, _atypes.Int64, _atypes.UInt32, _atypes.UInt64) + +def stateless_random_uniform_full_int(shape: Annotated[Any, TV_StatelessRandomUniformFullInt_T], seed: Annotated[Any, TV_StatelessRandomUniformFullInt_Tseed], dtype:TV_StatelessRandomUniformFullInt_dtype=_dtypes.uint64, name=None) -> Annotated[Any, TV_StatelessRandomUniformFullInt_dtype]: + r"""Outputs deterministic pseudorandom random integers from a uniform distribution. + + The generated values are uniform integers covering the whole range of `dtype`. + + The outputs are a deterministic function of `shape` and `seed`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`, `uint32`, `uint64`. + 2 seeds (shape [2]). + dtype: An optional `tf.DType` from: `tf.int32, tf.int64, tf.uint32, tf.uint64`. Defaults to `tf.uint64`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomUniformFullInt", name, shape, seed, "dtype", + dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_uniform_full_int_eager_fallback( + shape, seed, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomUniformFullInt", shape=shape, seed=seed, dtype=dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomUniformFullInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomUniformFullInt = tf_export("raw_ops.StatelessRandomUniformFullInt")(_ops.to_raw_op(stateless_random_uniform_full_int)) + + +def stateless_random_uniform_full_int_eager_fallback(shape: Annotated[Any, TV_StatelessRandomUniformFullInt_T], seed: Annotated[Any, TV_StatelessRandomUniformFullInt_Tseed], dtype: TV_StatelessRandomUniformFullInt_dtype, name, ctx) -> Annotated[Any, TV_StatelessRandomUniformFullInt_dtype]: + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, _dtypes.uint32, _dtypes.uint64, ], _dtypes.int64) + _inputs_flat = [shape, seed] + _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessRandomUniformFullInt", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomUniformFullInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessRandomUniformInt_dtype = TypeVar("TV_StatelessRandomUniformInt_dtype", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomUniformInt_T = TypeVar("TV_StatelessRandomUniformInt_T", _atypes.Int32, _atypes.Int64) +TV_StatelessRandomUniformInt_Tseed = TypeVar("TV_StatelessRandomUniformInt_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_random_uniform_int(shape: Annotated[Any, TV_StatelessRandomUniformInt_T], seed: Annotated[Any, TV_StatelessRandomUniformInt_Tseed], minval: Annotated[Any, TV_StatelessRandomUniformInt_dtype], maxval: Annotated[Any, TV_StatelessRandomUniformInt_dtype], name=None) -> Annotated[Any, TV_StatelessRandomUniformInt_dtype]: + r"""Outputs deterministic pseudorandom random integers from a uniform distribution. + + The generated values follow a uniform distribution in the range `[minval, maxval)`. + + The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + minval: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Minimum value (inclusive, scalar). + maxval: A `Tensor`. Must have the same type as `minval`. + Maximum value (exclusive, scalar). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `minval`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessRandomUniformInt", name, shape, seed, minval, maxval) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_random_uniform_int_eager_fallback( + shape, seed, minval, maxval, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessRandomUniformInt", shape=shape, seed=seed, minval=minval, + maxval=maxval, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessRandomUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessRandomUniformInt = tf_export("raw_ops.StatelessRandomUniformInt")(_ops.to_raw_op(stateless_random_uniform_int)) + + +def stateless_random_uniform_int_eager_fallback(shape: Annotated[Any, TV_StatelessRandomUniformInt_T], seed: Annotated[Any, TV_StatelessRandomUniformInt_Tseed], minval: Annotated[Any, TV_StatelessRandomUniformInt_dtype], maxval: Annotated[Any, TV_StatelessRandomUniformInt_dtype], name, ctx) -> Annotated[Any, TV_StatelessRandomUniformInt_dtype]: + _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([minval, maxval], ctx, [_dtypes.int32, _dtypes.int64, ]) + (minval, maxval) = _inputs_dtype + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed, minval, maxval] + _attrs = ("dtype", _attr_dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessRandomUniformInt", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessRandomUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatelessTruncatedNormal_dtype = TypeVar("TV_StatelessTruncatedNormal_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_StatelessTruncatedNormal_T = TypeVar("TV_StatelessTruncatedNormal_T", _atypes.Int32, _atypes.Int64) +TV_StatelessTruncatedNormal_Tseed = TypeVar("TV_StatelessTruncatedNormal_Tseed", _atypes.Int32, _atypes.Int64) + +def stateless_truncated_normal(shape: Annotated[Any, TV_StatelessTruncatedNormal_T], seed: Annotated[Any, TV_StatelessTruncatedNormal_Tseed], dtype:TV_StatelessTruncatedNormal_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatelessTruncatedNormal_dtype]: + r"""Outputs deterministic pseudorandom values from a truncated normal distribution. + + The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + The outputs are a deterministic function of `shape` and `seed`. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + seed: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 2 seeds (shape [2]). + dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatelessTruncatedNormal", name, shape, seed, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateless_truncated_normal_eager_fallback( + shape, seed, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatelessTruncatedNormal", shape=shape, seed=seed, dtype=dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "T", + _op._get_attr_type("T"), "Tseed", _op._get_attr_type("Tseed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatelessTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatelessTruncatedNormal = tf_export("raw_ops.StatelessTruncatedNormal")(_ops.to_raw_op(stateless_truncated_normal)) + + +def stateless_truncated_normal_eager_fallback(shape: Annotated[Any, TV_StatelessTruncatedNormal_T], seed: Annotated[Any, TV_StatelessTruncatedNormal_Tseed], dtype: TV_StatelessTruncatedNormal_dtype, name, ctx) -> Annotated[Any, TV_StatelessTruncatedNormal_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) + _inputs_flat = [shape, seed] + _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed) + _result = _execute.execute(b"StatelessTruncatedNormal", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatelessTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_partition_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_partition_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9b61abfa8797e6007c5826bf8292a3800543a8c2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_partition_ops.py @@ -0,0 +1,351 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_TPUPartitionedInput_T = TypeVar("TV_TPUPartitionedInput_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_partitioned_input(inputs: Annotated[List[Any], TV_TPUPartitionedInput_T], partition_dim:int=0, name=None) -> Annotated[Any, TV_TPUPartitionedInput_T]: + r"""An op that groups a list of partitioned inputs together. This op + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type. + A list of partitioned inputs which must have the same shape. + partition_dim: An optional `int`. Defaults to `0`. + An integer describles which dimension is partitioned. -1 means + those inputs are replicated. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUPartitionedInput", name, inputs, "partition_dim", + partition_dim) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_partitioned_input_eager_fallback( + inputs, partition_dim=partition_dim, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_partitioned_input' Op, not %r." % inputs) + _attr_N = len(inputs) + if partition_dim is None: + partition_dim = 0 + partition_dim = _execute.make_int(partition_dim, "partition_dim") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUPartitionedInput", inputs=inputs, partition_dim=partition_dim, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), + "partition_dim", _op._get_attr_int("partition_dim")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUPartitionedInput", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUPartitionedInput = tf_export("raw_ops.TPUPartitionedInput")(_ops.to_raw_op(tpu_partitioned_input)) + + +def tpu_partitioned_input_eager_fallback(inputs: Annotated[List[Any], TV_TPUPartitionedInput_T], partition_dim: int, name, ctx) -> Annotated[Any, TV_TPUPartitionedInput_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_partitioned_input' Op, not %r." % inputs) + _attr_N = len(inputs) + if partition_dim is None: + partition_dim = 0 + partition_dim = _execute.make_int(partition_dim, "partition_dim") + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "T", _attr_T, "partition_dim", partition_dim) + _result = _execute.execute(b"TPUPartitionedInput", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUPartitionedInput", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TPUPartitionedInputV2_T = TypeVar("TV_TPUPartitionedInputV2_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_partitioned_input_v2(inputs: Annotated[List[Any], TV_TPUPartitionedInputV2_T], partition_dims, is_packed:bool=False, name=None) -> Annotated[Any, TV_TPUPartitionedInputV2_T]: + r"""An op that groups a list of partitioned inputs together. Supports ND sharding. + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type. + A list of partitioned inputs which must have the same shape. + partition_dims: A list of `ints`. + A list of integers describing how each dimension is partitioned. Emptiness + indicates the inputs are replicated. + is_packed: An optional `bool`. Defaults to `False`. + Indicates whether the input is a packed resource. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUPartitionedInputV2", name, inputs, "partition_dims", + partition_dims, "is_packed", is_packed) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_partitioned_input_v2_eager_fallback( + inputs, partition_dims=partition_dims, is_packed=is_packed, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_partitioned_input_v2' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(partition_dims, (list, tuple)): + raise TypeError( + "Expected list for 'partition_dims' argument to " + "'tpu_partitioned_input_v2' Op, not %r." % partition_dims) + partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims] + if is_packed is None: + is_packed = False + is_packed = _execute.make_bool(is_packed, "is_packed") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUPartitionedInputV2", inputs=inputs, partition_dims=partition_dims, + is_packed=is_packed, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), + "partition_dims", _op.get_attr("partition_dims"), "is_packed", + _op._get_attr_bool("is_packed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUPartitionedInputV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUPartitionedInputV2 = tf_export("raw_ops.TPUPartitionedInputV2")(_ops.to_raw_op(tpu_partitioned_input_v2)) + + +def tpu_partitioned_input_v2_eager_fallback(inputs: Annotated[List[Any], TV_TPUPartitionedInputV2_T], partition_dims, is_packed: bool, name, ctx) -> Annotated[Any, TV_TPUPartitionedInputV2_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_partitioned_input_v2' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(partition_dims, (list, tuple)): + raise TypeError( + "Expected list for 'partition_dims' argument to " + "'tpu_partitioned_input_v2' Op, not %r." % partition_dims) + partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims] + if is_packed is None: + is_packed = False + is_packed = _execute.make_bool(is_packed, "is_packed") + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "T", _attr_T, "partition_dims", partition_dims, + "is_packed", is_packed) + _result = _execute.execute(b"TPUPartitionedInputV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUPartitionedInputV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TPUPartitionedOutput_T = TypeVar("TV_TPUPartitionedOutput_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_partitioned_output(inputs: Annotated[Any, TV_TPUPartitionedOutput_T], num_splits: int, partition_dim:int=0, name=None): + r"""An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned + + outputs outside the XLA computation. + + Args: + inputs: A `Tensor`. + A tensor which represents the full shape of partitioned tensors. + num_splits: An `int` that is `>= 1`. + partition_dim: An optional `int`. Defaults to `0`. + An integer describles which dimension is partitioned. + name: A name for the operation (optional). + + Returns: + A list of `num_splits` `Tensor` objects with the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUPartitionedOutput", name, inputs, "num_splits", num_splits, + "partition_dim", partition_dim) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_partitioned_output_eager_fallback( + inputs, num_splits=num_splits, partition_dim=partition_dim, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_splits = _execute.make_int(num_splits, "num_splits") + if partition_dim is None: + partition_dim = 0 + partition_dim = _execute.make_int(partition_dim, "partition_dim") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUPartitionedOutput", inputs=inputs, num_splits=num_splits, + partition_dim=partition_dim, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "num_splits", + _op._get_attr_int("num_splits"), "partition_dim", + _op._get_attr_int("partition_dim")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUPartitionedOutput", _inputs_flat, _attrs, _result) + return _result + +TPUPartitionedOutput = tf_export("raw_ops.TPUPartitionedOutput")(_ops.to_raw_op(tpu_partitioned_output)) + + +def tpu_partitioned_output_eager_fallback(inputs: Annotated[Any, TV_TPUPartitionedOutput_T], num_splits: int, partition_dim: int, name, ctx): + num_splits = _execute.make_int(num_splits, "num_splits") + if partition_dim is None: + partition_dim = 0 + partition_dim = _execute.make_int(partition_dim, "partition_dim") + _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, []) + _inputs_flat = [inputs] + _attrs = ("T", _attr_T, "num_splits", num_splits, "partition_dim", + partition_dim) + _result = _execute.execute(b"TPUPartitionedOutput", num_splits, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUPartitionedOutput", _inputs_flat, _attrs, _result) + return _result + + +TV_TPUPartitionedOutputV2_T = TypeVar("TV_TPUPartitionedOutputV2_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_partitioned_output_v2(inputs: Annotated[Any, TV_TPUPartitionedOutputV2_T], num_splits: int, partition_dims, name=None): + r"""An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned + + outputs outside the XLA computation. Supports ND sharding. + + Args: + inputs: A `Tensor`. + A tensor which represents the full shape of partitioned tensors. + num_splits: An `int` that is `>= 1`. + partition_dims: A list of `ints`. + A list of integers describing how each dimension is partitioned. Emptiness + indicates the inputs are replicated. + name: A name for the operation (optional). + + Returns: + A list of `num_splits` `Tensor` objects with the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUPartitionedOutputV2", name, inputs, "num_splits", + num_splits, "partition_dims", partition_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_partitioned_output_v2_eager_fallback( + inputs, num_splits=num_splits, partition_dims=partition_dims, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_splits = _execute.make_int(num_splits, "num_splits") + if not isinstance(partition_dims, (list, tuple)): + raise TypeError( + "Expected list for 'partition_dims' argument to " + "'tpu_partitioned_output_v2' Op, not %r." % partition_dims) + partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUPartitionedOutputV2", inputs=inputs, num_splits=num_splits, + partition_dims=partition_dims, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "num_splits", + _op._get_attr_int("num_splits"), "partition_dims", + _op.get_attr("partition_dims")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUPartitionedOutputV2", _inputs_flat, _attrs, _result) + return _result + +TPUPartitionedOutputV2 = tf_export("raw_ops.TPUPartitionedOutputV2")(_ops.to_raw_op(tpu_partitioned_output_v2)) + + +def tpu_partitioned_output_v2_eager_fallback(inputs: Annotated[Any, TV_TPUPartitionedOutputV2_T], num_splits: int, partition_dims, name, ctx): + num_splits = _execute.make_int(num_splits, "num_splits") + if not isinstance(partition_dims, (list, tuple)): + raise TypeError( + "Expected list for 'partition_dims' argument to " + "'tpu_partitioned_output_v2' Op, not %r." % partition_dims) + partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims] + _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, []) + _inputs_flat = [inputs] + _attrs = ("T", _attr_T, "num_splits", num_splits, "partition_dims", + partition_dims) + _result = _execute.execute(b"TPUPartitionedOutputV2", num_splits, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUPartitionedOutputV2", _inputs_flat, _attrs, _result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/init_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/init_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..35ce2be00ba2931d5bda5483165a5626f41d9075 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/init_ops.py @@ -0,0 +1,1833 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations often used for initializing tensors. + +All variable initializers returned by functions in this file should have the +following signature: + +def _initializer(shape, dtype=dtypes.float32, partition_info=None): + Args: + shape: List of `int` representing the shape of the output `Tensor`. Some + initializers may also be able to accept a `Tensor`. + dtype: (Optional) Type of the output `Tensor`. + partition_info: (Optional) variable_scope._PartitionInfo object holding + additional information about how the variable is partitioned. May be + `None` if the variable is not partitioned. + + Returns: + A `Tensor` of type `dtype` and `shape`. +""" +import math + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import gen_linalg_ops +from tensorflow.python.ops import linalg_ops_impl +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.deprecation import deprecated_arg_values +from tensorflow.python.util.deprecation import deprecated_args +from tensorflow.python.util.tf_export import tf_export + + +class Initializer: + """Initializer base class: all initializers inherit from this class.""" + + def __call__(self, shape, dtype=None, partition_info=None): + """Returns a tensor object initialized as specified by the initializer. + + Args: + shape: Shape of the tensor. + dtype: Optional dtype of the tensor. If not provided use the initializer + dtype. + partition_info: Optional information about the possible partitioning of a + tensor. + """ + raise NotImplementedError + + def get_config(self): + """Returns the configuration of the initializer as a JSON-serializable dict. + + Returns: + A JSON-serializable Python dict. + """ + return {} + + @classmethod + def from_config(cls, config): + """Instantiates an initializer from a configuration dictionary. + + Example: + + ```python + initializer = RandomUniform(-1, 1) + config = initializer.get_config() + initializer = RandomUniform.from_config(config) + ``` + + Args: + config: A Python dictionary. It will typically be the output of + `get_config`. + + Returns: + An Initializer instance. + """ + return cls(**config) + + +@tf_export(v1=["initializers.zeros", "zeros_initializer"]) +@deprecation.deprecated_endpoints("initializers.zeros") +class Zeros(Initializer): + """Initializer that generates tensors initialized to 0. + + @compatibility(TF2) + `tf.compat.v1.zeros_initializer` is compatible with eager execution + and `tf.function`. + + To migrate to TF2, please use `tf.zeros_initializer` instead. The `dtype` + argument in `tf.compat.v1.zeros_initializer.__init__()` does not exist in + `tf.zeros_initializer.__init__()`. However, you can specify the `dtype` in + `__call__()` in both cases. + + #### Structural Mapping to TF2 + + Before: + + ```python + initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32) + variable = tf.Variable(initializer(shape=[3, 3])) + ``` + + After: + + ```python + initializer = tf.zeros_initializer() + variable = tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :------------------- | :--------------- | :------------------------- | + | `dtype` | `dtype` | In `__call__()` method | + | `partition_info` | - | (`__call__` arg in TF1) Not supported | + + + #### Before & After Usage Example + + Before: + + >>> initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32) + >>> tf.Variable(initializer(shape=[3])).numpy() + array([0., 0., 0.], dtype=float32) + >>> tf.Variable(initializer(shape=[3, 3])).numpy() + array([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> initializer = tf.compat.v1.zeros_initializer() + >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() + array([0., 0., 0.], dtype=float32) + >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() + array([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=float32) + + After: + + >>> initializer = tf.zeros_initializer() + >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() + array([0., 0., 0.], dtype=float32) + >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() + array([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=float32) + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, dtype=dtypes.float32): + self.dtype = dtypes.as_dtype(dtype) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + return array_ops.zeros(shape, dtype) + + def get_config(self): + return {"dtype": self.dtype.name} + + +@tf_export(v1=["initializers.ones", "ones_initializer"]) +@deprecation.deprecated_endpoints("initializers.ones", "ones_initializer") +class Ones(Initializer): + """Initializer that generates tensors initialized to 1. + + @compatibility(TF2) + This API is compatible with TF2 behavior and `tf.function`, and can be + migrated immediately with `tf.keras.initializers.ones`. + + Before: + >>> initializer = tf.compat.v1.keras.initializers.ones() + >>> initializer((1, 1)) + + + After: + >>> initializer = tf.keras.initializers.ones() + >>> initializer((1, 1)) + + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, dtype=dtypes.float32): + self.dtype = dtypes.as_dtype(dtype) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + return array_ops.ones(shape, dtype) + + def get_config(self): + return {"dtype": self.dtype.name} + + +@tf_export(v1=["initializers.constant", "constant_initializer"]) +@deprecation.deprecated_endpoints("constant_initializer") +class Constant(Initializer): + """Initializer that generates tensors with constant values. + + The resulting tensor is populated with values of type `dtype`, as + specified by arguments `value` following the desired `shape` of the + new tensor (see examples below). + + The argument `value` can be a constant value, or a list of values of type + `dtype`. If `value` is a list, then the length of the list must be less + than or equal to the number of elements implied by the desired shape of the + tensor. In the case where the total number of elements in `value` is less + than the number of elements required by the tensor shape, the last element + in `value` will be used to fill the remaining entries. If the total number of + elements in `value` is greater than the number of elements required by the + tensor shape, the initializer will raise a `ValueError`. + + Args: + value: A Python scalar, list or tuple of values, or a N-dimensional numpy + array. All elements of the initialized variable will be set to the + corresponding value in the `value` argument. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. + verify_shape: Boolean that enables verification of the shape of `value`. If + `True`, the initializer will throw an error if the shape of `value` is not + compatible with the shape of the initialized tensor. + + Raises: + TypeError: If the input `value` is not one of the expected types. + + Examples: + The following example can be rewritten using a numpy.ndarray instead + of the `value` list, even reshaped, as shown in the two commented lines + below the `value` list initialization. + + >>> value = [0, 1, 2, 3, 4, 5, 6, 7] + >>> init = tf.compat.v1.constant_initializer(value) + >>> # fitting shape + >>> with tf.compat.v1.Session(): + ... x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init) + ... x.initializer.run() + ... print(x.eval()) + [[0. 1. 2. 3.] + [4. 5. 6. 7.]] + >>> # Larger shape + >>> with tf.compat.v1.Session(): + ... y = tf.compat.v1.get_variable('y', shape=[3, 4], initializer=init) + ... y.initializer.run() + ... print(y.eval()) + [[0. 1. 2. 3.] + [4. 5. 6. 7.] + [7. 7. 7. 7.]] + >>> # Smaller shape + >>> with tf.compat.v1.Session(): + ... z = tf.compat.v1.get_variable('z', shape=[2, 3], initializer=init) + Traceback (most recent call last): + ... + ValueError: Too many elements provided. Needed at most 6, but received 8 + >>> # Shape verification + >>> init_verify = tf.compat.v1.constant_initializer(value, verify_shape=True) + >>> with tf.compat.v1.Session(): + ... u = tf.compat.v1.get_variable('u', shape=[3, 4], + ... initializer=init_verify) + Traceback (most recent call last): + ... + TypeError: Expected Tensor's shape: (3, 4), got (8,). + + @compatibility(TF2) + Although it is a legacy API endpoint, `tf.compat.v1.constant_initializer` + is compatible with eager execution and `tf.function`. + + To migrate to a non-legacy TF2 API, please use `tf.constant_initializer` + instead. The `dtype` + argument in `tf.compat.v1.constant_initializer.__init__()` does not exist in + `tf.constant_initializer.__init__()`. However, you can specify the `dtype` in + `__call__()` in both cases. + + In the `compat.v1` symbol, if `verify_shape` is set to `True`, an exception + is raised when initializing a variable with a different shape from + `value`. If set to `False`, `value` is reshaped to initialize the variable + if necessary. An exception would only be raised when the number of + elements are different. + + The `verify_shape` argument is not supported in TF2. Using + `tf.constant_initializer` is equivalent to setting `verify_shape` to `False`. + + #### Structural Mapping to TF2 + + Before: + + ```python + value = [0, 1, 2, 3, 4, 5, 6, 7] + initializer = tf.compat.v1.constant_initializer( + value=value, + dtype=tf.float32, + verify_shape=False) + variable = tf.Variable(initializer(shape=[2, 4])) + ``` + + After: + + ```python + value = [0, 1, 2, 3, 4, 5, 6, 7] + initializer = tf.constant_initializer(value=value) + tf.Variable(initializer(shape=[2, 4], dtype=tf.float32)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :--------------- | :-------------------------- | + | `value` | `value` | In constructor | + | `dtype` | `dtype` | In `__call__()` method | + | `verify_shape` | Not Supported | Equivalent to set to `False`| + | `partition_info` | - | (`__call__` arg in TF1) Not supported | + + + #### Before & After Usage Example + + Before: + + >>> value = [1., 2., 3., 4.] + >>> initializer = tf.compat.v1.constant_initializer( + ... value=value, dtype=tf.float32, verify_shape=True) + >>> tf.Variable(initializer(shape=[2, 2])).numpy() + Traceback (most recent call last): + ... + TypeError: Expected Tensor's shape: (2, 2), got (4,). + >>> initializer = tf.compat.v1.constant_initializer( + ... value=value, dtype=tf.float32, verify_shape=False) + >>> tf.Variable(initializer(shape=[2, 2])).numpy() + array([[1., 2.], + [3., 4.]], dtype=float32) + + After: + + >>> value = [1., 2., 3., 4.] + >>> initializer = tf.constant_initializer(value=value) + >>> tf.Variable(initializer(shape=[2, 2], dtype=tf.float32)).numpy() + array([[1., 2.], + [3., 4.]], dtype=float32) + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + @deprecated_args(None, "Objects must now be the required shape or no shape " + "can be specified", "verify_shape") + def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False): + if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))): + raise TypeError( + f"Invalid type for initial value={value} of type: " + f"{type(value).__name__}. Expected Python scalar, list or tuple of " + "values, or numpy.ndarray.") + + self.value = value + self.dtype = dtypes.as_dtype(dtype) + self._verify_shape = verify_shape + + def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None): + if dtype is None: + dtype = self.dtype + if verify_shape is None: + verify_shape = self._verify_shape + return constant_op.constant_v1( + self.value, dtype=dtype, shape=shape, verify_shape=verify_shape) + + def get_config(self): + # We don't include `verify_shape` for compatibility with Keras. + # `verify_shape` should be passed as an argument to `__call__` rather + # than as a constructor argument: conceptually it isn't a property + # of the initializer. + return {"value": self.value, "dtype": self.dtype.name} + + +@tf_export(v1=["initializers.random_uniform", "random_uniform_initializer"]) +@deprecation.deprecated_endpoints("initializers.random_uniform") +class RandomUniform(Initializer): + """Initializer that generates tensors with a uniform distribution. + + Args: + minval: A python scalar or a scalar tensor. Lower bound of the range of + random values to generate. + maxval: A python scalar or a scalar tensor. Upper bound of the range of + random values to generate. Defaults to 1 for float types. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. + + @compatibility(TF2) + Although it is a legacy compat.v1 API, this symbol is compatible with eager + execution and `tf.function`. + + To switch to TF2, switch to using either + `tf.initializers.RandomUniform` or `tf.keras.initializers.RandomUniform` + (neither from `compat.v1`) and + pass the dtype when calling the initializer. Keep in mind that + the default minval, maxval and the behavior of fixed seeds have changed. + + #### Structural Mapping to TF2 + + Before: + + ```python + initializer = tf.compat.v1.random_uniform_initializer( + minval=minval, + maxval=maxval, + seed=seed, + dtype=dtype) + + weight_one = tf.Variable(initializer(shape_one)) + weight_two = tf.Variable(initializer(shape_two)) + ``` + + After: + + ```python + initializer = tf.initializers.RandomUniform( + minval=minval, + maxval=maxval, + seed=seed) + + weight_one = tf.Variable(initializer(shape_one, dtype=dtype)) + weight_two = tf.Variable(initializer(shape_two, dtype=dtype)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :-------------- | :------------------------- | + | `minval` | `minval` | Default changes from 0 to -0.05 | + | `maxval` | `maxval` | Default changes from 1.0 to 0.05 | + | `seed` | `seed` | | + | `dtype` | `dtype` | The TF2 native api only takes it | + : : : as a `__call__` arg, not a constructor arg. : + | `partition_info` | - | (`__call__` arg in TF1) Not supported | + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, minval=.0, maxval=None, seed=None, dtype=dtypes.float32): + self.minval = minval + self.maxval = maxval + self.seed = seed + self.dtype = dtypes.as_dtype(dtype) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + return random_ops.random_uniform( + shape, self.minval, self.maxval, dtype, seed=self.seed) + + def get_config(self): + return { + "minval": self.minval, + "maxval": self.maxval, + "seed": self.seed, + "dtype": self.dtype.name + } + + +@tf_export(v1=["initializers.random_normal", "random_normal_initializer"]) +@deprecation.deprecated_endpoints("initializers.random_normal") +class RandomNormal(Initializer): + """Initializer that generates tensors with a normal distribution. + + Args: + mean: a python scalar or a scalar tensor. Mean of the random values to + generate. + stddev: a python scalar or a scalar tensor. Standard deviation of the random + values to generate. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + + @compatibility(TF2) + Although it is a legacy `compat.v1` API, this symbol is compatible with eager + execution and `tf.function`. + + To switch to TF2, switch to using either + `tf.initializers.RandomNormal` or `tf.keras.initializers.RandomNormal` + (neither from `compat.v1`) and + pass the dtype when calling the initializer. Keep in mind that + the default stddev and the behavior of fixed seeds have changed. + + #### Structural Mapping to TF2 + + Before: + + ```python + initializer = tf.compat.v1.random_normal_initializer( + mean=mean, + stddev=stddev, + seed=seed, + dtype=dtype) + + weight_one = tf.Variable(initializer(shape_one)) + weight_two = tf.Variable(initializer(shape_two)) + ``` + + After: + + ```python + initializer = tf.initializers.RandomNormal( + mean=mean, + seed=seed, + stddev=stddev) + + weight_one = tf.Variable(initializer(shape_one, dtype=dtype)) + weight_two = tf.Variable(initializer(shape_two, dtype=dtype)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :----------------- | :-------------- | :------------------------- | + | `mean` | `mean` | No change to defaults | + | `stddev` | `stddev` | Default changes from 1.0 to 0.05 | + | `seed` | `seed` | | + | `dtype` | `dtype` | The TF2 native api only takes it as a | + : : : `__call__` arg, not a constructor arg. : + | `partition_info` | - | (`__call__` arg in TF1) Not supported. | + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): + self.mean = mean + self.stddev = stddev + self.seed = seed + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + return random_ops.random_normal( + shape, self.mean, self.stddev, dtype, seed=self.seed) + + def get_config(self): + return { + "mean": self.mean, + "stddev": self.stddev, + "seed": self.seed, + "dtype": self.dtype.name + } + + +@tf_export(v1=["initializers.truncated_normal", "truncated_normal_initializer"]) +@deprecation.deprecated_endpoints("initializers.truncated_normal", + "truncated_normal_initializer") +class TruncatedNormal(Initializer): + """Initializer that generates a truncated normal distribution. + + These values are similar to values from a `random_normal_initializer` + except that values more than two standard deviations from the mean + are discarded and re-drawn. This is the recommended initializer for + neural network weights and filters. + + Args: + mean: a python scalar or a scalar tensor. Mean of the random values to + generate. + stddev: a python scalar or a scalar tensor. Standard deviation of the random + values to generate. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + + @compatibility(TF2) + Although it is a legacy `compat.v1` API, this symbol is compatible with eager + execution and `tf.function`. + + To switch to TF2, switch to using either + `tf.initializers.truncated_normal` or `tf.keras.initializers.TruncatedNormal` + (neither from `compat.v1`) and + pass the dtype when calling the initializer. Keep in mind that + the default stddev and the behavior of fixed seeds have changed. + + #### Structural Mapping to TF2 + + Before: + + ```python + initializer = tf.compat.v1.truncated_normal_initializer( + mean=mean, + stddev=stddev, + seed=seed, + dtype=dtype) + + weight_one = tf.Variable(initializer(shape_one)) + weight_two = tf.Variable(initializer(shape_two)) + ``` + + After: + + ```python + initializer = tf.initializers.truncated_normal( + mean=mean, + seed=seed, + stddev=stddev) + + weight_one = tf.Variable(initializer(shape_one, dtype=dtype)) + weight_two = tf.Variable(initializer(shape_two, dtype=dtype)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :-------------- | :------------------------- | + | `mean` | `mean` | No change to defaults | + | `stddev` | `stddev` | Default changes from 1.0 to 0.05 | + | `seed` | `seed` | | + | `dtype` | `dtype` | The TF2 native api only takes it | + : : : as a `__call__` arg, not a constructor arg. : + | `partition_info` | - | (`__call__` arg in TF1) Not supported | + + @end_compatibility + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): + self.mean = mean + self.stddev = stddev + self.seed = seed + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + return random_ops.truncated_normal( + shape, self.mean, self.stddev, dtype, seed=self.seed) + + def get_config(self): + return { + "mean": self.mean, + "stddev": self.stddev, + "seed": self.seed, + "dtype": self.dtype.name + } + + +@tf_export(v1=[ + "initializers.uniform_unit_scaling", "uniform_unit_scaling_initializer" +]) +@deprecation.deprecated_endpoints("uniform_unit_scaling_initializer", + "initializers.uniform_unit_scaling") +class UniformUnitScaling(Initializer): + """Initializer that generates tensors without scaling variance. + + When initializing a deep network, it is in principle advantageous to keep + the scale of the input variance constant, so it does not explode or diminish + by reaching the final layer. If the input is `x` and the operation `x * W`, + and we want to initialize `W` uniformly at random, we need to pick `W` from + + [-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)] + + to keep the scale intact, where `dim = W.shape[0]` (the size of the input). + A similar calculation for convolutional networks gives an analogous result + with `dim` equal to the product of the first 3 dimensions. When + nonlinearities are present, we need to multiply this by a constant `factor`. + See (Sussillo et al., 2014) for deeper motivation, experiments + and the calculation of constants. In section 2.3 there, the constants were + numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15. + + Args: + factor: Float. A multiplicative factor by which the values will be scaled. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558) + ([pdf](http://arxiv.org/pdf/1412.6558.pdf)) + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + @deprecated(None, + "Use tf.initializers.variance_scaling instead with distribution=" + "uniform to get equivalent behavior.") + def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32): + self.factor = factor + self.seed = seed + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + scale_shape = shape + if partition_info is not None: + scale_shape = partition_info.full_shape + + input_size = 1.0 + # Estimating input size is not possible to do perfectly, but we try. + # The estimate, obtained by multiplying all dimensions but the last one, + # is the right thing for matrix multiply and convolutions (see above). + for dim in scale_shape[:-1]: + input_size *= float(dim) + # Avoid errors when initializing zero-size tensors. + input_size = max(input_size, 1.0) + max_val = math.sqrt(3 / input_size) * self.factor + return random_ops.random_uniform( + shape, -max_val, max_val, dtype, seed=self.seed) + + def get_config(self): + return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name} + + +@tf_export(v1=["initializers.variance_scaling", "variance_scaling_initializer"]) +@deprecation.deprecated_endpoints("initializers.variance_scaling", + "variance_scaling_initializer") +class VarianceScaling(Initializer): + """Initializer capable of adapting its scale to the shape of weights tensors. + + @compatibility(TF2) + Although it is a legacy `compat.v1` API, this symbol is compatible with eager + execution and `tf.function`. + + To switch to TF2 APIs, move to using either + `tf.initializers.variance_scaling` or `tf.keras.initializers.VarianceScaling` + (neither from `compat.v1`) and + pass the dtype when calling the initializer. + + #### Structural Mapping to TF2 + + Before: + + ```python + initializer = tf.compat.v1.variance_scaling_initializer( + scale=scale, + mode=mode, + distribution=distribution + seed=seed, + dtype=dtype) + + weight_one = tf.Variable(initializer(shape_one)) + weight_two = tf.Variable(initializer(shape_two)) + ``` + + After: + + ```python + initializer = tf.keras.initializers.VarianceScaling( + scale=scale, + mode=mode, + distribution=distribution + seed=seed) + + weight_one = tf.Variable(initializer(shape_one, dtype=dtype)) + weight_two = tf.Variable(initializer(shape_two, dtype=dtype)) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :----------------- | :-------------- | :------------------------- | + | `scale` | `scale` | No change to defaults | + | `mode` | `mode` | No change to defaults | + | `distribution` | `distribution` | No change to defaults. | + : : : 'normal' maps to 'truncated_normal' : + | `seed` | `seed` | | + | `dtype` | `dtype` | The TF2 api only takes it | + : : : as a `__call__` arg, not a constructor arg. : + | `partition_info` | - | (`__call__` arg in TF1) Not supported | + + @end_compatibility + + With `distribution="truncated_normal" or "untruncated_normal"`, + samples are drawn from a truncated/untruncated normal + distribution with a mean of zero and a standard deviation (after truncation, + if used) `stddev = sqrt(scale / n)` + where n is: + - number of input units in the weight tensor, if mode = "fan_in" + - number of output units, if mode = "fan_out" + - average of the numbers of input and output units, if mode = "fan_avg" + + With `distribution="uniform"`, samples are drawn from a uniform distribution + within [-limit, limit], with `limit = sqrt(3 * scale / n)`. + + Args: + scale: Scaling factor (positive float). + mode: One of "fan_in", "fan_out", "fan_avg". + distribution: Random distribution to use. One of "normal", "uniform". + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + + Raises: + ValueError: In case of an invalid value for the "scale", mode" or + "distribution" arguments. + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + @deprecated_arg_values( + None, + "`normal` is a deprecated alias for `truncated_normal`", + distribution="normal") + def __init__(self, + scale=1.0, + mode="fan_in", + distribution="truncated_normal", + seed=None, + dtype=dtypes.float32): + if scale <= 0.: + raise ValueError("Argument `scale` must be a positive float. Received: " + f"{scale}") + if mode not in {"fan_in", "fan_out", "fan_avg"}: + raise ValueError("Argument `mode` should be one of ('fan_in', 'fan_out', " + f"'fan_avg'). Received: {mode}") + distribution = distribution.lower() + if distribution not in { + "normal", "uniform", "truncated_normal", "untruncated_normal" + }: + raise ValueError("Argument `distribution` should be one of ('normal', " + "uniform', 'truncated_normal', 'untruncated_normal'). " + f"Received: {distribution}") + self.scale = scale + self.mode = mode + self.distribution = distribution + self.seed = seed + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + scale = self.scale + scale_shape = shape + if partition_info is not None: + scale_shape = partition_info.full_shape + fan_in, fan_out = _compute_fans(scale_shape) + if self.mode == "fan_in": + scale /= max(1., fan_in) + elif self.mode == "fan_out": + scale /= max(1., fan_out) + else: + scale /= max(1., (fan_in + fan_out) / 2.) + if self.distribution == "normal" or self.distribution == "truncated_normal": + # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) + stddev = math.sqrt(scale) / .87962566103423978 + return random_ops.truncated_normal( + shape, 0.0, stddev, dtype, seed=self.seed) + elif self.distribution == "untruncated_normal": + stddev = math.sqrt(scale) + return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed) + else: + limit = math.sqrt(3.0 * scale) + return random_ops.random_uniform( + shape, -limit, limit, dtype, seed=self.seed) + + def get_config(self): + return { + "scale": self.scale, + "mode": self.mode, + "distribution": self.distribution, + "seed": self.seed, + "dtype": self.dtype.name + } + + +@tf_export(v1=["initializers.orthogonal", "orthogonal_initializer"]) +@deprecation.deprecated_endpoints("initializers.orthogonal", + "orthogonal_initializer") +class Orthogonal(Initializer): + """Initializer that generates an orthogonal matrix. + + If the shape of the tensor to initialize is two-dimensional, it is initialized + with an orthogonal matrix obtained from the QR decomposition of a matrix of + random numbers drawn from a normal distribution. + If the matrix has fewer rows than columns then the output will have orthogonal + rows. Otherwise, the output will have orthogonal columns. + + If the shape of the tensor to initialize is more than two-dimensional, + a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` + is initialized, where `n` is the length of the shape vector. + The matrix is subsequently reshaped to give a tensor of the desired shape. + + Args: + gain: multiplicative factor to apply to the orthogonal matrix + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) + ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): + self.gain = gain + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + self.seed = seed + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + # Check the shape + if len(shape) < 2: + raise ValueError("The tensor to initialize, specified by argument `shape`" + " must be at least two-dimensional. Received shape=" + f"{shape}") + # Flatten the input shape with the last dimension remaining + # its original shape so it works for conv2d + num_rows = 1 + for dim in shape[:-1]: + num_rows *= dim + num_rows = int(num_rows) + num_cols = int(shape[-1]) + if num_rows < num_cols: + flat_shape = (num_cols, num_rows) + else: + flat_shape = (num_rows, num_cols) + + # Generate a random matrix + a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed) + # Compute the qr factorization + q, r = gen_linalg_ops.qr(a, full_matrices=False) + # Make Q uniform + d = array_ops.diag_part(r) + q *= math_ops.sign(d) + if num_rows < num_cols: + q = array_ops.matrix_transpose(q) + return self.gain * array_ops.reshape(q, shape) + + def get_config(self): + return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} + + +# Note these haven't been ported to TF2.0. They are not currently visible and +# the tests are non trivial to port +class ConvolutionDeltaOrthogonal(Initializer): + """Initializer that generates a delta orthogonal kernel for ConvNets. + + The shape of the tensor must have length 3, 4 or 5. The number of input + filters must not exceed the number of output filters. The center pixels of the + tensor form an orthogonal matrix. Other pixels are set to be zero. See + algorithm 2 in (Xiao et al., 2018). + + + Args: + gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. + The 2-norm of an input is multiplied by a factor of `gain` after applying + this convolution. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) + ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) + """ + + def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): + self.gain = gain + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + self.seed = seed + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + # Check the shape + if len(shape) < 3 or len(shape) > 5: + raise ValueError("The tensor to initialize, specified by argument `shape`" + " must be at least three-dimensional and at most " + f"five-dimensional. Received shape={shape}") + + if shape[-2] > shape[-1]: + raise ValueError(f"In_filters, specified by shape[-2]={shape[-2]} cannot " + "be greater than out_filters, specified by " + f"shape[-1]={shape[-1]}.") + + # Generate a random matrix + a = random_ops.random_normal([shape[-1], shape[-1]], + dtype=dtype, + seed=self.seed) + # Compute the qr factorization + q, r = gen_linalg_ops.qr(a, full_matrices=False) + # Make Q uniform + d = array_ops.diag_part(r) + q *= math_ops.sign(d) + q = q[:shape[-2], :] + q *= math_ops.cast(self.gain, dtype=dtype) + if len(shape) == 3: + weight = array_ops.scatter_nd([[(shape[0] - 1) // 2]], + array_ops.expand_dims(q, 0), shape) + elif len(shape) == 4: + weight = array_ops.scatter_nd([[(shape[0] - 1) // 2, + (shape[1] - 1) // 2]], + array_ops.expand_dims(q, 0), shape) + else: + weight = array_ops.scatter_nd([[(shape[0] - 1) // 2, (shape[1] - 1) // 2, + (shape[2] - 1) // 2]], + array_ops.expand_dims(q, 0), shape) + return weight + + def get_config(self): + return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} + + +class ConvolutionOrthogonal(Initializer): + """Initializer that generates orthogonal kernel for ConvNets. + + Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution. + + Args: + gain: multiplicative factor to apply to the orthogonal matrix. Default is 1. + The 2-norm of an input is multiplied by a factor of `gain` after applying + this convolution. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) + ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) + """ + + def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): + self.gain = gain + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + self.seed = seed + + def __call__(self, shape, dtype=None, partition_info=None): + raise NotImplementedError + + def get_config(self): + return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} + + # Helper functions. + def _orthogonal_matrix(self, n): + """Construct an n x n orthogonal matrix. + + Args: + n: Dimension. + + Returns: + A n x n orthogonal matrix. + """ + a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed) + if self.seed: + self.seed += 1 + q, r = gen_linalg_ops.qr(a) + d = array_ops.diag_part(r) + # make q uniform + q *= math_ops.sign(d) + return q + + def _symmetric_projection(self, n): + """Compute a n x n symmetric projection matrix. + + Args: + n: Dimension. + + Returns: + A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T. + """ + q = self._orthogonal_matrix(n) + # randomly zeroing out some columns + mask = math_ops.cast( + random_ops.random_normal([n], seed=self.seed) > 0, self.dtype) + if self.seed: + self.seed += 1 + c = math_ops.multiply(q, mask) + return math_ops.matmul(c, array_ops.matrix_transpose(c)) + + +class ConvolutionOrthogonal2D(ConvolutionOrthogonal): + """Initializer that generates a 2D orthogonal kernel for ConvNets. + + The shape of the tensor must have length 4. The number of input + filters must not exceed the number of output filters. + The orthogonality(==isometry) is exact when the inputs are circular padded. + There are finite-width effects with non-circular padding (e.g. zero padding). + See algorithm 1 in (Xiao et al., 2018). + + Args: + gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. + This has the effect of scaling the output 2-norm by a factor of `gain`. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) + ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) + """ + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + if len(shape) != 4: + raise ValueError("The tensor to initialize, specified by argument `shape`" + f" must be four-dimensional. Received: {shape}") + + if shape[-2] > shape[-1]: + raise ValueError(f"In_filters, specified by shape[-2]={shape[-2]} cannot " + "be greater than out_filters, specified by " + f"shape[-1]={shape[-1]}.") + + if shape[0] != shape[1]: + raise ValueError(f"Kernel sizes, specified by shape[0]={shape[0]} and " + f"shape[1]={shape[1]} must be equal.") + + kernel = self._orthogonal_kernel(shape[0], shape[2], shape[3]) + kernel *= math_ops.cast(self.gain, dtype=dtype) + return kernel + + def _dict_to_tensor(self, x, k1, k2): + """Convert a dictionary to a tensor. + + Args: + x: A k1 * k2 dictionary. + k1: First dimension of x. + k2: Second dimension of x. + + Returns: + A k1 * k2 tensor. + """ + + return array_ops_stack.stack([ + array_ops_stack.stack([x[i, j] for j in range(k2)]) for i in range(k1)]) + + def _block_orth(self, p1, p2): + """Construct a 2 x 2 kernel. + + Used to construct orthgonal kernel. + + Args: + p1: A symmetric projection matrix. + p2: A symmetric projection matrix. + + Returns: + A 2 x 2 kernel [[p1p2, p1(1-p2)], + [(1-p1)p2, (1-p1)(1-p2)]]. + Raises: + ValueError: If the dimensions of p1 and p2 are different. + """ + if p1.shape.as_list() != p2.shape.as_list(): + raise ValueError("The dimension of the matrices must be the same. " + f"Received p1.shape={p1.shape} and p2.shape={p2.shape}.") + n = p1.shape.as_list()[0] + kernel2x2 = {} + eye = linalg_ops_impl.eye(n, dtype=self.dtype) + kernel2x2[0, 0] = math_ops.matmul(p1, p2) + kernel2x2[0, 1] = math_ops.matmul(p1, (eye - p2)) + kernel2x2[1, 0] = math_ops.matmul((eye - p1), p2) + kernel2x2[1, 1] = math_ops.matmul((eye - p1), (eye - p2)) + + return kernel2x2 + + def _matrix_conv(self, m1, m2): + """Matrix convolution. + + Args: + m1: A k x k dictionary, each element is a n x n matrix. + m2: A l x l dictionary, each element is a n x n matrix. + + Returns: + (k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix. + Raises: + ValueError: if the entries of m1 and m2 are of different dimensions. + """ + + n = (m1[0, 0]).shape.as_list()[0] + if n != (m2[0, 0]).shape.as_list()[0]: + raise ValueError("The entries in matrices m1 and m2 must have the same " + f"dimensions. Received m1[0, 0].shape={m1[0, 0].shape} " + f"and m2[0, 0].shape={m2[0, 0].shape}.") + k = int(np.sqrt(len(m1))) + l = int(np.sqrt(len(m2))) + result = {} + size = k + l - 1 + # Compute matrix convolution between m1 and m2. + for i in range(size): + for j in range(size): + result[i, j] = array_ops.zeros([n, n], self.dtype) + for index1 in range(min(k, i + 1)): + for index2 in range(min(k, j + 1)): + if (i - index1) < l and (j - index2) < l: + result[i, j] += math_ops.matmul(m1[index1, index2], + m2[i - index1, j - index2]) + return result + + def _orthogonal_kernel(self, ksize, cin, cout): + """Construct orthogonal kernel for convolution. + + Args: + ksize: Kernel size. + cin: Number of input channels. + cout: Number of output channels. + + Returns: + An [ksize, ksize, cin, cout] orthogonal kernel. + Raises: + ValueError: If cin > cout. + """ + if cin > cout: + raise ValueError(f"The number of input channels (cin={cin}) cannot exceed" + f" the number of output channels (cout={cout}).") + orth = self._orthogonal_matrix(cout)[0:cin, :] + if ksize == 1: + return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0) + + p = self._block_orth( + self._symmetric_projection(cout), self._symmetric_projection(cout)) + for _ in range(ksize - 2): + temp = self._block_orth( + self._symmetric_projection(cout), self._symmetric_projection(cout)) + p = self._matrix_conv(p, temp) + for i in range(ksize): + for j in range(ksize): + p[i, j] = math_ops.matmul(orth, p[i, j]) + + return self._dict_to_tensor(p, ksize, ksize) + + +class ConvolutionOrthogonal1D(ConvolutionOrthogonal): + """Initializer that generates a 1D orthogonal kernel for ConvNets. + + The shape of the tensor must have length 3. The number of input + filters must not exceed the number of output filters. + The orthogonality(==isometry) is exact when the inputs are circular padded. + There are finite-width effects with non-circular padding (e.g. zero padding). + See algorithm 1 in (Xiao et al., 2018). + + Args: + gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. + The 2-norm of an input is multiplied by a factor of `gain` after applying + this convolution. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) + ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) + """ + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + if len(shape) != 3: + raise ValueError("The tensor to initialize, specified by argument `shape`" + f" must be three-dimensional. Received shape={shape}") + + if shape[-2] > shape[-1]: + raise ValueError(f"In_filters, specified by shape[-2]={shape[-2]} cannot " + "be greater than out_filters, specified by " + f"shape[-1]={shape[-1]}.") + + kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1]) + kernel *= math_ops.cast(self.gain, dtype=dtype) + return kernel + + def _dict_to_tensor(self, x, k): + """Convert a dictionary to a tensor. + + Args: + x: A dictionary of length k. + k: Dimension of x. + + Returns: + A tensor with the same dimension. + """ + + return array_ops_stack.stack([x[i] for i in range(k)]) + + def _block_orth(self, projection_matrix): + """Construct a kernel. + + Used to construct orthgonal kernel. + + Args: + projection_matrix: A symmetric projection matrix of size n x n. + + Returns: + [projection_matrix, (1 - projection_matrix)]. + """ + n = projection_matrix.shape.as_list()[0] + kernel = {} + eye = linalg_ops_impl.eye(n, dtype=self.dtype) + kernel[0] = projection_matrix + kernel[1] = eye - projection_matrix + return kernel + + def _matrix_conv(self, m1, m2): + """Matrix convolution. + + Args: + m1: A dictionary of length k, each element is a n x n matrix. + m2: A dictionary of length l, each element is a n x n matrix. + + Returns: + (k + l - 1) dictionary each element is a n x n matrix. + Raises: + ValueError: Ff the entries of m1 and m2 are of different dimensions. + """ + + n = (m1[0]).shape.as_list()[0] + if n != (m2[0]).shape.as_list()[0]: + raise ValueError("The entries in matrices m1 and m2 must have the same " + f"dimensions. Received m1[0].shape={m1[0].shape} " + f"and m2[0].shape={m2[0].shape}.") + k = len(m1) + l = len(m2) + result = {} + size = k + l - 1 + # Compute matrix convolution between m1 and m2. + for i in range(size): + result[i] = array_ops.zeros([n, n], self.dtype) + for index in range(min(k, i + 1)): + if (i - index) < l: + result[i] += math_ops.matmul(m1[index], m2[i - index]) + return result + + def _orthogonal_kernel(self, ksize, cin, cout): + """Construct orthogonal kernel for convolution. + + Args: + ksize: Kernel size. + cin: Number of input channels. + cout: Number of output channels. + + Returns: + An [ksize, ksize, cin, cout] orthogonal kernel. + Raises: + ValueError: If cin > cout. + """ + if cin > cout: + raise ValueError(f"The number of input channels (cin={cin}) cannot exceed" + f" the number of output channels (cout={cout}).") + orth = self._orthogonal_matrix(cout)[0:cin, :] + if ksize == 1: + return array_ops.expand_dims(orth, 0) + + p = self._block_orth(self._symmetric_projection(cout)) + for _ in range(ksize - 2): + temp = self._block_orth(self._symmetric_projection(cout)) + p = self._matrix_conv(p, temp) + for i in range(ksize): + p[i] = math_ops.matmul(orth, p[i]) + + return self._dict_to_tensor(p, ksize) + + +class ConvolutionOrthogonal3D(ConvolutionOrthogonal): + """Initializer that generates a 3D orthogonal kernel for ConvNets. + + The shape of the tensor must have length 5. The number of input + filters must not exceed the number of output filters. + The orthogonality(==isometry) is exact when the inputs are circular padded. + There are finite-width effects with non-circular padding (e.g. zero padding). + See algorithm 1 (Xiao et al., 2018). + + Args: + gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. + The 2-norm of an input is multiplied by a factor of `gain` after applying + this convolution. + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) + ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) + """ + + def __call__(self, shape, dtype=None, partition_info=None): + if dtype is None: + dtype = self.dtype + if len(shape) != 5: + raise ValueError("The tensor to initialize, specified by argument `shape`" + f" must be five-dimensional. Received shape={shape}") + + if shape[-2] > shape[-1]: + raise ValueError(f"In_filters, specified by shape[-2]={shape[-2]} cannot " + "be greater than out_filters, specified by " + f"shape[-1]={shape[-1]}.") + + if shape[0] != shape[1] or shape[0] != shape[2]: + raise ValueError(f"Kernel sizes, specified by shape[0]={shape[0]}, " + f"shape[1]={shape[1]} and shape[2]={shape[2]} must be " + "equal.") + + kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1]) + kernel *= math_ops.cast(self.gain, dtype=dtype) + return kernel + + def _dict_to_tensor(self, x, k1, k2, k3): + """Convert a dictionary to a tensor. + + Args: + x: A k1 * k2 dictionary. + k1: First dimension of x. + k2: Second dimension of x. + k3: Third dimension of x. + + Returns: + A k1 * k2 * k3 tensor. + """ + + return array_ops_stack.stack([array_ops_stack.stack( + [array_ops_stack.stack([x[i, j, k] for k in range(k3)]) + for j in range(k2)]) for i in range(k1)]) + + def _block_orth(self, p1, p2, p3): + """Construct a 3 x 3 kernel. + + Used to construct orthgonal kernel. + + Args: + p1: A symmetric projection matrix. + p2: A symmetric projection matrix. + p3: A symmetric projection matrix. + + Returns: + A 2 x 2 x 2 kernel. + Raises: + ValueError: If the dimensions of p1, p2 and p3 are different. + """ + p1_shape = p1.shape.as_list() + if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list(): + raise ValueError("The dimension of the matrices must be the same. " + f"Received p1.shape={p1.shape}, p2.shape={p2.shape} and" + f" p3.shape={p3.shape}.") + n = p1_shape[0] + eye = linalg_ops_impl.eye(n, dtype=self.dtype) + kernel2x2x2 = {} + + def matmul(p1, p2, p3): + return math_ops.matmul(math_ops.matmul(p1, p2), p3) + + def cast(i, p): + """Return p or (1-p).""" + return i * p + (1 - i) * (eye - p) + + for i in [0, 1]: + for j in [0, 1]: + for k in [0, 1]: + kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3)) + return kernel2x2x2 + + def _matrix_conv(self, m1, m2): + """Matrix convolution. + + Args: + m1: is a k x k x k dictionary, each element is a n x n matrix. + m2: is a l x l x l dictionary, each element is a n x n matrix. + + Returns: + (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each + element is a n x n matrix. + Raises: + ValueError: if the entries of m1 and m2 are of different dimensions. + """ + + n = (m1[0, 0, 0]).shape.as_list()[0] + if n != (m2[0, 0, 0]).shape.as_list()[0]: + raise ValueError("The entries in matrices m1 and m2 must have the same " + "dimensions. Received m1[0, 0, 0].shape=" + f"{m1[0, 0, 0].shape} and m2[0, 0, 0].shape=" + f"{m2[0, 0, 0].shape}.") + k = int(np.cbrt(len(m1))) + l = int(np.cbrt(len(m2))) + result = {} + size = k + l - 1 + # Compute matrix convolution between m1 and m2. + for i in range(size): + for j in range(size): + for r in range(size): + result[i, j, r] = array_ops.zeros([n, n], self.dtype) + for index1 in range(min(k, i + 1)): + for index2 in range(min(k, j + 1)): + for index3 in range(min(k, r + 1)): + if (i - index1) < l and (j - index2) < l and (r - index3) < l: + result[i, j, r] += math_ops.matmul( + m1[index1, index2, index3], + m2[i - index1, j - index2, r - index3]) + return result + + def _orthogonal_kernel(self, ksize, cin, cout): + """Construct orthogonal kernel for convolution. + + Args: + ksize: Kernel size. + cin: Number of input channels. + cout: Number of output channels. + + Returns: + An [ksize, ksize, ksize, cin, cout] orthogonal kernel. + Raises: + ValueError: If cin > cout. + """ + if cin > cout: + raise ValueError(f"The number of input channels (cin={cin}) cannot exceed" + f" the number of output channels (cout={cout}).") + orth = self._orthogonal_matrix(cout)[0:cin, :] + if ksize == 1: + return array_ops.expand_dims( + array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0) + + p = self._block_orth( + self._symmetric_projection(cout), self._symmetric_projection(cout), + self._symmetric_projection(cout)) + for _ in range(ksize - 2): + temp = self._block_orth( + self._symmetric_projection(cout), self._symmetric_projection(cout), + self._symmetric_projection(cout)) + p = self._matrix_conv(p, temp) + for i in range(ksize): + for j in range(ksize): + for k in range(ksize): + p[i, j, k] = math_ops.matmul(orth, p[i, j, k]) + + return self._dict_to_tensor(p, ksize, ksize, ksize) + + +@tf_export(v1=["initializers.identity"]) +@deprecation.deprecated_endpoints("initializers.identity") +class Identity(Initializer): + """Initializer that generates the identity matrix. + + Only use for 2D matrices. + + Args: + gain: Multiplicative factor to apply to the identity matrix. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, gain=1.0, dtype=dtypes.float32): + self.gain = gain + self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) + + def __call__(self, shape, dtype=None, partition_info=None): + full_shape = shape if partition_info is None else partition_info.full_shape + if len(full_shape) != 2: + raise ValueError("The tensor to initialize, specified by argument `shape`" + " must be at least two-dimensional. Received shape=" + f"{shape}") + if dtype is None: + dtype = self.dtype + if isinstance(full_shape, tensor_shape.TensorShape): + full_shape = full_shape.as_list() + initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) + if partition_info is not None: + initializer = array_ops.slice(initializer, partition_info.var_offset, + shape) + return self.gain * initializer + + def get_config(self): + return {"gain": self.gain, "dtype": self.dtype.name} + + +@tf_export(v1=["glorot_uniform_initializer", "initializers.glorot_uniform"]) +@deprecation.deprecated_endpoints("glorot_uniform_initializer", + "initializers.glorot_uniform") +class GlorotUniform(VarianceScaling): + """The Glorot uniform initializer, also called Xavier uniform initializer. + + It draws samples from a uniform distribution within [-limit, limit] + where `limit` is `sqrt(6 / (fan_in + fan_out))` + where `fan_in` is the number of input units in the weight tensor + and `fan_out` is the number of output units in the weight tensor. + + Args: + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) + ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, seed=None, dtype=dtypes.float32): + super(GlorotUniform, self).__init__( + scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) + + def get_config(self): + return {"seed": self.seed, "dtype": self.dtype.name} + + +@tf_export(v1=["glorot_normal_initializer", "initializers.glorot_normal"]) +@deprecation.deprecated_endpoints("glorot_normal_initializer", + "initializers.glorot_normal") +class GlorotNormal(VarianceScaling): + """The Glorot normal initializer, also called Xavier normal initializer. + + It draws samples from a truncated normal distribution centered on 0 + with standard deviation (after truncation) given by + `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number + of input units in the weight tensor and `fan_out` is the number of + output units in the weight tensor. + + Args: + seed: A Python integer. Used to create random seeds. See + `tf.compat.v1.set_random_seed` for behavior. + dtype: Default data type, used if no `dtype` argument is provided when + calling the initializer. Only floating point types are supported. + References: + [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) + ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) + """ + + @deprecated_args(None, + "Call initializer instance with the dtype argument instead " + "of passing it to the constructor", "dtype") + def __init__(self, seed=None, dtype=dtypes.float32): + super(GlorotNormal, self).__init__( + scale=1.0, mode="fan_avg", distribution="truncated_normal", seed=seed) + + def get_config(self): + return {"seed": self.seed, "dtype": self.dtype.name} + + +# Aliases. + +# pylint: disable=invalid-name +zeros_initializer = Zeros +ones_initializer = Ones +constant_initializer = Constant +random_uniform_initializer = RandomUniform +random_normal_initializer = RandomNormal +truncated_normal_initializer = TruncatedNormal +uniform_unit_scaling_initializer = UniformUnitScaling +variance_scaling_initializer = VarianceScaling +glorot_uniform_initializer = GlorotUniform +glorot_normal_initializer = GlorotNormal +orthogonal_initializer = Orthogonal +identity_initializer = Identity +convolutional_delta_orthogonal = ConvolutionDeltaOrthogonal +convolutional_orthogonal_1d = ConvolutionOrthogonal1D +convolutional_orthogonal_2d = ConvolutionOrthogonal2D +convolutional_orthogonal_3d = ConvolutionOrthogonal3D +# pylint: enable=invalid-name + + +@tf_export(v1=["initializers.lecun_normal"]) +def lecun_normal(seed=None): + """LeCun normal initializer. + + It draws samples from a truncated normal distribution centered on 0 + with standard deviation (after truncation) given by + `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of + input units in the weight tensor. + + Args: + seed: A Python integer. Used to seed the random generator. + + Returns: + An initializer. + + References: + - Self-Normalizing Neural Networks, + [Klambauer et al., + 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) + # pylint: disable=line-too-long + ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) + - Efficient Backprop, + [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) + """ + return VarianceScaling( + scale=1., mode="fan_in", distribution="truncated_normal", seed=seed) + + +@tf_export(v1=["initializers.lecun_uniform"]) +def lecun_uniform(seed=None): + """LeCun uniform initializer. + + It draws samples from a uniform distribution within [-limit, limit] + where `limit` is `sqrt(3 / fan_in)` + where `fan_in` is the number of input units in the weight tensor. + + Args: + seed: A Python integer. Used to seed the random generator. + + Returns: + An initializer. + + References: + - Self-Normalizing Neural Networks, + [Klambauer et al., + 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) + # pylint: disable=line-too-long + ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) + - Efficient Backprop, + [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) + """ + return VarianceScaling( + scale=1., mode="fan_in", distribution="uniform", seed=seed) + + +@tf_export(v1=["initializers.he_normal"]) +def he_normal(seed=None): + """He normal initializer. + + It draws samples from a truncated normal distribution centered on 0 + with standard deviation (after truncation) given by + `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of + input units in the weight tensor. + + Args: + seed: A Python integer. Used to seed the random generator. + + Returns: + An initializer. + + References: + [He et al., 2015] + (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) + # pylint: disable=line-too-long + ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) + """ + return VarianceScaling( + scale=2., mode="fan_in", distribution="truncated_normal", seed=seed) + + +@tf_export(v1=["initializers.he_uniform"]) +def he_uniform(seed=None): + """He uniform variance scaling initializer. + + It draws samples from a uniform distribution within [-limit, limit] + where `limit` is `sqrt(6 / fan_in)` + where `fan_in` is the number of input units in the weight tensor. + + Args: + seed: A Python integer. Used to seed the random generator. + + Returns: + An initializer. + + References: + [He et al., 2015] + (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) + # pylint: disable=line-too-long + ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) + """ + return VarianceScaling( + scale=2., mode="fan_in", distribution="uniform", seed=seed) + + +# Utility functions. + + +def _compute_fans(shape): + """Computes the number of input and output units for a weight shape. + + Args: + shape: Integer shape tuple or TF tensor shape. + + Returns: + A tuple of integer scalars (fan_in, fan_out). + """ + if len(shape) < 1: # Just to avoid errors for constants. + fan_in = fan_out = 1 + elif len(shape) == 1: + fan_in = fan_out = shape[0] + elif len(shape) == 2: + fan_in = shape[0] + fan_out = shape[1] + else: + # Assuming convolution kernels (2D, 3D, or more). + # kernel shape: (..., input_depth, depth) + receptive_field_size = 1 + for dim in shape[:-2]: + receptive_field_size *= dim + fan_in = shape[-2] * receptive_field_size + fan_out = shape[-1] * receptive_field_size + return int(fan_in), int(fan_out) + + +def _assert_float_dtype(dtype): + """Validate and return floating point type based on `dtype`. + + `dtype` must be a floating point type. + + Args: + dtype: The data type to validate. + + Returns: + Validated type. + + Raises: + ValueError: if `dtype` is not a floating point type. + """ + if not dtype.is_floating: + raise ValueError("Argument `dtype` is expected to be floating point. " + f"Received: {dtype}.") + return dtype diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resource_variable_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resource_variable_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5011178cef7c13c7cdc43c0abaf3bf188f79f9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/resource_variable_ops.py @@ -0,0 +1,2801 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops to use variables as resources.""" + +# pylint: disable=g-bad-name +import contextlib +import functools +import weakref + +import numpy as np + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import variable_pb2 +from tensorflow.core.function import trace_type +from tensorflow.core.protobuf import struct_pb2 +from tensorflow.python.checkpoint import tensor_callable +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.compat import compat as forward_compat +from tensorflow.python.eager import context +from tensorflow.python.eager import record +from tensorflow.python.eager import tape +from tensorflow.python.framework import auto_control_deps_utils as acd +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import cpp_shape_inference_pb2 +from tensorflow.python.framework import device as pydev +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_module +from tensorflow.python.framework import tensor_conversion_registry +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_resource_variable_ops +from tensorflow.python.ops import gen_state_ops +from tensorflow.python.ops import handle_data_util +from tensorflow.python.ops import state_ops +from tensorflow.python.ops import variables +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_resource_variable_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.trackable import base as trackable +from tensorflow.python.types import core +from tensorflow.python.util import compat +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.tf_export import tf_export + +acd.register_read_only_resource_op("ReadVariableOp") +acd.register_read_only_resource_op("VariableShape") +acd.register_read_only_resource_op("ResourceGather") +acd.register_read_only_resource_op("ResourceGatherNd") +acd.register_read_only_resource_op("_ReadVariablesOp") + +# TODO(allenl): Remove this alias and migrate callers. +get_resource_handle_data = handle_data_util.get_resource_handle_data + + +def get_eager_safe_handle_data(handle): + """Get the data handle from the Tensor `handle`.""" + assert isinstance(handle, tensor_module.Tensor) + + if isinstance(handle, ops.EagerTensor): + return handle._handle_data # pylint: disable=protected-access + else: + return get_resource_handle_data(handle) + + +def _set_handle_shapes_and_types(tensor, handle_data, graph_mode): + """Sets the shape inference result HandleData on tensor. + + Args: + tensor: A `Tensor` or `EagerTensor`. + handle_data: A `CppShapeInferenceResult.HandleData`. + graph_mode: A python bool. + """ + tensor._handle_data = handle_data # pylint: disable=protected-access + if not graph_mode: + return + + # Not an EagerTensor, so a graph tensor. + shapes, types = zip( + *[(pair.shape, pair.dtype) for pair in handle_data.shape_and_type]) + ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes] + shapes = [ + [d.size for d in s.dim] # pylint: disable=g-complex-comprehension + if not s.unknown_rank else None for s in shapes + ] + with tensor._op.graph._c_graph.get() as c_graph: # pylint: disable=protected-access + pywrap_tf_session.TF_GraphSetOutputHandleShapesAndTypes_wrapper( + c_graph, + tensor._as_tf_output(), # pylint: disable=protected-access + shapes, + ranks, + types) + + +def _combine_handle_data(handle, initial_value): + """Concats HandleData from tensors `handle` and `initial_value`. + + Args: + handle: A `Tensor` of dtype `resource`. + initial_value: A `Tensor`. + + Returns: + A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype + `variant`, the `HandleData` contains the concatenation of the shape_and_type + from both `handle` and `initial_value`. + + Raises: + RuntimeError: If handle, which was returned by VarHandleOp, either has + no handle data, or its len(handle_data.shape_and_type) != 1. + """ + assert handle.dtype == dtypes.resource + + variable_handle_data = get_eager_safe_handle_data(handle) + + if initial_value.dtype != dtypes.variant: + return variable_handle_data + + extra_handle_data = get_eager_safe_handle_data(initial_value) + if extra_handle_data is not None and extra_handle_data.is_set: + if (variable_handle_data is None or not variable_handle_data.is_set or + len(variable_handle_data.shape_and_type) != 1): + raise RuntimeError( + "Expected VarHandleOp to return a length==1 shape_and_type, " + f"but saw: '{variable_handle_data}'") + variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type) + return variable_handle_data + + +def _variable_handle_from_shape_and_dtype(shape, + dtype, + shared_name, + name, + graph_mode, + initial_value=None): + """Create a variable handle, copying in handle data from `initial_value`.""" + container = ops.get_default_graph()._container # pylint: disable=protected-access + if container is None: + container = "" + shape = tensor_shape.as_shape(shape) + dtype = dtypes.as_dtype(dtype) + if not graph_mode: + if shared_name is not None: + raise errors.InternalError( + node_def=None, + op=None, + message="Using an explicit shared_name is " + "not allowed when executing eagerly.") + shared_name = context.anonymous_name() + + handle = gen_resource_variable_ops.var_handle_op( + shape=shape, + dtype=dtype, + shared_name=shared_name, + debug_name=name, + name=name, + container=container) + if initial_value is None: + initial_value = handle + if graph_mode: + full_handle_data = _combine_handle_data(handle, initial_value) + _set_handle_shapes_and_types(handle, full_handle_data, graph_mode) + return handle + else: + handle_data = handle_data_util.create_handle_data(shape, dtype) + if initial_value is not None and initial_value.dtype == dtypes.variant: + extra_handle_data = get_eager_safe_handle_data(initial_value) + if extra_handle_data is not None and extra_handle_data.is_set: + if (not handle_data.is_set or len(handle_data.shape_and_type) != 1): + raise RuntimeError( + "Expected VarHandleOp to return a length==1 shape_and_type, " + f"but saw: '{handle_data}'") + handle_data.shape_and_type.extend(extra_handle_data.shape_and_type) + + _set_handle_shapes_and_types(handle, handle_data, graph_mode) + return handle + + +def eager_safe_variable_handle(initial_value, shape, shared_name, name, + graph_mode): + """Creates a variable handle with information to do shape inference. + + The dtype is read from `initial_value` and stored in the returned + resource tensor's handle data. + + If `initial_value.dtype == tf.variant`, we additionally extract the handle + data (if any) from `initial_value` and append it to the `handle_data`. + In this case, the returned tensor's handle data is in the form + + ``` + is_set: true + shape_and_type { + shape { + // initial_value.shape + } + dtype: DT_VARIANT + } + shape_and_type { + // handle_data(initial_value).shape_and_type[0] + } + shape_and_type { + // handle_data(initial_value).shape_and_type[1] + } + ... + ``` + + Ops that read from this tensor, such as `ReadVariableOp` and + `AssignVariableOp`, know that `handle_data(handle).shape_and_type[1:]` + correspond to the handle data of the variant(s) stored in the Variable. + + Args: + initial_value: A `Tensor`. + shape: The shape of the handle data. Can be `TensorShape(None)` (i.e. + unknown shape). + shared_name: A string. + name: A string. + graph_mode: A python bool. + + Returns: + The handle, a `Tensor` of type `resource`. + """ + dtype = initial_value.dtype.base_dtype + return _variable_handle_from_shape_and_dtype(shape, dtype, shared_name, name, + graph_mode, initial_value) + + +@contextlib.contextmanager +def _handle_graph(handle): + # Note: might have an eager tensor but not be executing eagerly when building + # functions. + if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor) or + ops.has_default_graph()): + yield + else: + with handle.graph.as_default(): + yield + + +class EagerResourceDeleter: + """An object which cleans up a resource handle. + + An alternative to defining a __del__ method on an object. The intended use is + that ResourceVariables or other objects with resource handles will maintain a + single reference to this object. When the parent object is collected, this + object will be too. Even if the parent object is part of a reference cycle, + the cycle will be collectable. + """ + + __slots__ = ["_handle", "_handle_device", "_context"] + + def __init__(self, handle, handle_device): + if not isinstance(handle, tensor_module.Tensor): + raise ValueError( + (f"Passed handle={handle} to EagerResourceDeleter. Was expecting " + f"the handle to be a `tf.Tensor`.")) + self._handle = handle + self._handle_device = handle_device + # This is held since the __del__ function runs an op, and if the context() + # is collected before this object, there will be a segfault when running the + # op. + self._context = context.context() + + def __del__(self): + # Resources follow object-identity when executing eagerly, so it is safe to + # delete the resource we have a handle to. + try: + # A packed EagerTensor doesn't own any resource. + if isinstance(self._handle, ops.EagerTensor) and self._handle.is_packed: + return + # This resource was created in eager mode. However, this destructor may be + # running in graph mode (especially during unit tests). To clean up + # successfully, we switch back into eager mode temporarily. + with context.eager_mode(): + with ops.device(self._handle_device): + gen_resource_variable_ops.destroy_resource_op( + self._handle, ignore_lookup_error=True) + except TypeError: + # Suppress some exceptions, mainly for the case when we're running on + # module deletion. Things that can go wrong include the context module + # already being unloaded, self._handle._handle_data no longer being + # valid, and so on. Printing warnings in these cases is silly + # (exceptions raised from __del__ are printed as warnings to stderr). + pass # 'NoneType' object is not callable when the handle has been + # partially unloaded. + except AttributeError: + pass # 'NoneType' object has no attribute 'eager_mode' when context has + # been unloaded. Will catch other module unloads as well. + + +def shape_safe_assign_variable_handle(handle, shape, value, name=None): + """Helper that checks shape compatibility and assigns variable.""" + with _handle_graph(handle): + value_tensor = ops.convert_to_tensor(value) + shape.assert_is_compatible_with(value_tensor.shape) + return gen_resource_variable_ops.assign_variable_op( + handle, value_tensor, name=name) + + +def _maybe_set_handle_data(dtype, handle, tensor): + if dtype == dtypes.variant: + # For DT_VARIANT types, the handle's shape_and_type[1:] stores the + # variant's handle data. Extract it. + handle_data = get_eager_safe_handle_data(handle) + if handle_data.is_set and len(handle_data.shape_and_type) > 1: + tensor._handle_data = ( # pylint: disable=protected-access + cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData( + is_set=True, shape_and_type=handle_data.shape_and_type[1:])) + + +def variable_accessed(variable): + """Records that `variable` was accessed for the tape and FuncGraph.""" + if hasattr(ops.get_default_graph(), "watch_variable"): + ops.get_default_graph().watch_variable(variable) + if variable.trainable: + tape.variable_accessed(variable) + + +def default_variable_creator_v2(next_creator=None, **kwargs): + """Default variable creator.""" + assert next_creator is None + initial_value = kwargs.get("initial_value", None) + trainable = kwargs.get("trainable", None) + validate_shape = kwargs.get("validate_shape", True) + caching_device = kwargs.get("caching_device", None) + name = kwargs.get("name", None) + variable_def = kwargs.get("variable_def", None) + dtype = kwargs.get("dtype", None) + import_scope = kwargs.get("import_scope", None) + constraint = kwargs.get("constraint", None) + distribute_strategy = kwargs.get("distribute_strategy", None) + synchronization = kwargs.get("synchronization", None) + aggregation = kwargs.get("aggregation", None) + shape = kwargs.get("shape", None) + experimental_enable_variable_lifting = kwargs.get( + "experimental_enable_variable_lifting", None) + + return ResourceVariable( + initial_value=initial_value, + trainable=trainable, + validate_shape=validate_shape, + caching_device=caching_device, + name=name, + dtype=dtype, + constraint=constraint, + variable_def=variable_def, + import_scope=import_scope, + distribute_strategy=distribute_strategy, + synchronization=synchronization, + aggregation=aggregation, + shape=shape, + experimental_enable_variable_lifting=experimental_enable_variable_lifting, + ) + + +class BaseResourceVariable(variables.Variable, core.Tensor): + """A python variable from an existing handle.""" + + # TODO(wangpeng): Deprecate `constraint` when callers no long pass it in. + def __init__( # pylint: disable=super-init-not-called + self, + trainable=None, + shape=None, + dtype=None, + handle=None, + constraint=None, + synchronization=None, + aggregation=None, + distribute_strategy=None, + name=None, + unique_id=None, + handle_name=None, + graph_element=None, + initial_value=None, + initializer_op=None, + is_initialized_op=None, + cached_value=None, + save_slice_info=None, + caching_device=None, + in_graph_mode=None, + validate_shape=True, + **unused_kwargs): + """Creates a variable from a handle. + + Args: + trainable: If `True`, GradientTapes automatically watch uses of this + Variable. + shape: The variable's shape. This shape can be set to tf.TensorShape(None) + in order to assign values of different shapes to this variable. + Otherwise (i.e. if the shape is fully determined), it will trigger run + time checks to ensure that each assignment is of the same shape. + dtype: The variable's dtype. + handle: The variable's handle + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + distribute_strategy: The distribution strategy this variable was created + under. + name: The name for this variable. + unique_id: Internal. Unique ID for this variable's handle. + handle_name: The name for the variable's handle. + graph_element: Optional, required only in session.run-mode. Pre-created + tensor which reads this variable's value. + initial_value: Optional. Variable's initial value. + initializer_op: Operation which assigns the variable's initial value. + is_initialized_op: Pre-created operation to check whether this variable is + initialized. + cached_value: Pre-created operation to read this variable in a specific + device. + save_slice_info: Metadata for variable partitioning. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + in_graph_mode: whether we are executing in TF1 graph mode. If None, will + detect within the function. This is to avoid repeated init_scope() + conetxt entrances which can add up. + validate_shape: If `False`, allows the variable to be initialized with a + value of unknown shape. If `True`, the default, the shape of + `initial_value` must be known. + """ + if in_graph_mode is None: + with ops.init_scope(): + self._in_graph_mode = not context.executing_eagerly() + else: + self._in_graph_mode = in_graph_mode + synchronization, aggregation, trainable = ( + variables.validate_synchronization_aggregation_trainable( + synchronization, aggregation, trainable, name)) + self._trainable = trainable + self._synchronization = synchronization + self._aggregation = aggregation + self._save_slice_info = save_slice_info + self._initial_value = initial_value + self._initializer_op = initializer_op + self._is_initialized_op = is_initialized_op + self._graph_element = graph_element + self._caching_device = caching_device + self._cached_value = cached_value + self._distribute_strategy = distribute_strategy + # Store the graph key so optimizers know how to only retrieve variables from + # this graph. Guaranteed to be the same as the eager graph_key. + self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access + self._shape = tensor_shape.as_shape(shape) + self._dtype = dtypes.as_dtype(dtype) + self._handle = handle + self._unique_id = unique_id + if handle_name is None: + self._handle_name = "Variable:0" + else: + self._handle_name = handle_name + ":0" + self._constraint = constraint + self._cached_shape_as_list = None + self._validate_shape = validate_shape + + def __repr__(self): + if context.executing_eagerly() and not self._in_graph_mode: + # If we cannot read the value for any reason (e.g. variable uninitialized + # during tf.function tracing), still produce a __repr__. Note that for + # async eager, errors due to uninitialized variables will raise in + # ops.value_text when the handle is resolved, so we need to keep that + # under the try...except if we want to suppress them. + try: + with ops.device(self.device): + value_text = ops.value_text(self.read_value(), is_repr=True) + except: # pylint: disable=bare-except + value_text = "numpy=" + + return "" % ( + self.name, self.get_shape(), self.dtype.name, value_text) + else: + return "" % ( + self.name, self.get_shape(), self.dtype.name) + + def __tf_tracing_type__(self, signature_context): + alias_id = signature_context.alias_global_id(self._handle._id) # pylint:disable=protected-access + # TODO(xjun): Create variable placeholders directly from VariableSpec + # without using original values. + signature_context.add_placeholder(alias_id, self) + return VariableSpec(shape=self.shape, + dtype=self.dtype, + trainable=self.trainable, + alias_id=alias_id) + + @contextlib.contextmanager + def _assign_dependencies(self): + """Makes assignments depend on the cached value, if any. + + This prevents undefined behavior with reads not ordered wrt writes. + + Yields: + None. + """ + if self._cached_value is not None: + with ops.control_dependencies([self._cached_value]): + yield + else: + yield + + def __array__(self, dtype=None): + """Allows direct conversion to a numpy array. + + >>> np.array(tf.Variable([1.0])) + array([1.], dtype=float32) + + Returns: + The variable value as a numpy array. + """ + # You can't return `self.numpy()` here because for scalars + # that raises: + # ValueError: object __array__ method not producing an array + # Even `self.read_value().__array__()` and `self.read_value()._numpy()` give + # the same error. The `EagerTensor` class must be doing something behind the + # scenes to make `np.array(tf.constant(1))` work. + return np.asarray(self.numpy(), dtype=dtype) + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + return bool(self.read_value()) + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + if not context.executing_eagerly(): + raise NotImplementedError( + "__deepcopy__() is only available when eager execution is enabled.") + copied_variable = ResourceVariable( + initial_value=self.read_value(), + trainable=self._trainable, + constraint=self._constraint, + dtype=self._dtype, + name=self._shared_name, + distribute_strategy=self._distribute_strategy, + synchronization=self.synchronization, + aggregation=self.aggregation) + memo[self._unique_id] = copied_variable + return copied_variable + + @property + def dtype(self): + """The dtype of this variable.""" + return self._dtype + + @property + def device(self): + """The device this variable is on.""" + return self.handle.device + + @property + def graph(self): + """The `Graph` of this variable.""" + return self.handle.graph + + @property + def name(self): + """The name of the handle for this variable.""" + return self._handle_name + + @property + def shape(self): + """The shape of this variable.""" + return self._shape + + def set_shape(self, shape): + self._shape = self._shape.merge_with(shape) + + def _shape_as_list(self): + if self.shape.ndims is None: + return None + return [dim.value for dim in self.shape.dims] + + def _shape_tuple(self): + shape = self._shape_as_list() + if shape is None: + return None + return tuple(shape) + + @property + def create(self): + """The op responsible for initializing this variable.""" + if not self._in_graph_mode: + raise RuntimeError("This operation is not supported " + "when eager execution is enabled.") + return self._initializer_op + + @property + def handle(self): + """The handle by which this variable can be accessed.""" + return self._handle + + def value(self): + """A cached operation which reads the value of this variable.""" + if self._cached_value is not None: + return self._cached_value + with ops.colocate_with(None, ignore_existing=True): + return self._read_variable_op() + + def _as_graph_element(self): + """Conversion function for Graph.as_graph_element().""" + return self._graph_element + + @property + def initializer(self): + """The op responsible for initializing this variable.""" + return self._initializer_op + + @property + def initial_value(self): + """Returns the Tensor used as the initial value for the variable.""" + if context.executing_eagerly(): + raise RuntimeError("This property is not supported " + "when eager execution is enabled.") + return self._initial_value + + @property + def constraint(self): + """Returns the constraint function associated with this variable. + + Returns: + The constraint function that was passed to the variable constructor. + Can be `None` if no constraint was passed. + """ + return self._constraint + + @property + def op(self) -> ops.Operation: + """The op for this variable.""" + return self.handle.op + + @property + def trainable(self): + return self._trainable + + @property + def synchronization(self): + return self._synchronization + + @property + def aggregation(self): + return self._aggregation + + def eval(self, session=None): + """Evaluates and returns the value of this variable.""" + if context.executing_eagerly(): + raise RuntimeError("This operation is not supported " + "when eager execution is enabled.") + return self._graph_element.eval(session=session) + + def numpy(self): + if context.executing_eagerly(): + return self.read_value().numpy() + raise NotImplementedError( + "numpy() is only available when eager execution is enabled.") + + @deprecated(None, "Prefer Dataset.range instead.") + def count_up_to(self, limit): + """Increments this variable until it reaches `limit`. + + When that Op is run it tries to increment the variable by `1`. If + incrementing the variable would bring it above `limit` then the Op raises + the exception `OutOfRangeError`. + + If no error is raised, the Op outputs the value of the variable before + the increment. + + This is essentially a shortcut for `count_up_to(self, limit)`. + + Args: + limit: value at which incrementing the variable raises an error. + + Returns: + A `Tensor` that will hold the variable value before the increment. If no + other Op modifies this variable, the values produced will all be + distinct. + """ + return gen_state_ops.resource_count_up_to( + self.handle, limit=limit, T=self.dtype) + + def _copy_trackable_to_cpu(self, object_map): + """For implementing `Trackable`.""" + if self not in object_map: + # If not populated, initialize the cpu copy first. + op_device = pydev.DeviceSpec.from_string(self.device).replace( + device_type="CPU", device_index=0).to_string() + with ops.device(op_device): + # Use `op_device` to prevent cross-device communication for variables + # like `ShardedVariable` + new_var = UninitializedVariable( + trainable=self.trainable, + shape=self.shape, + dtype=self.dtype, + name=self._shared_name) # pylint: disable=protected-access + object_map[self] = new_var + + # Then copy value of self to the copy. + destination_var = object_map[self] + with ops.device(destination_var.device): + # Use `op_device` to prevent cross-device communication for variables + # like `ShardedVariable` + destination_var.assign(self.read_value()) + + def _export_to_saved_model_graph(self, object_map=None, tensor_map=None, + options=None, **kwargs): + """For implementing `Trackable`.""" + new_variable = None + if options.experimental_variable_policy._save_variable_devices(): # pylint:disable=protected-access + with ops.device(self.device): + new_variable = copy_to_graph_uninitialized(self) + else: + new_variable = copy_to_graph_uninitialized(self) + object_map[self] = new_variable + tensor_map[self.handle] = new_variable.handle + return [self.handle] + + def _serialize_to_tensors(self): + """Implements Trackable._serialize_to_tensors.""" + + def _read_variable_closure(): + v = self + with ops.device(v.device): + if context.executing_eagerly() and not v.is_initialized(): + # A SaveSpec tensor value of `None` indicates that the variable is + # uninitialized. + return None + # Read the variable without making a copy to limit memory usage. + x = v.read_value_no_copy() + # To allow variables placed on non-CPU devices to be checkpointed, + # we copy them to CPU on the same machine first. + with ops.device("/device:CPU:0"): + return array_ops.identity(x) + + return { + trackable.VARIABLE_VALUE_KEY: + tensor_callable.Callable( + _read_variable_closure, dtype=self.dtype, device=self.device) + } + + def _restore_from_tensors(self, restored_tensors): + """Implements Trackable._restore_from_tensors.""" + with ops.device(self.device): + restored_tensor = array_ops.identity( + restored_tensors[trackable.VARIABLE_VALUE_KEY]) + try: + assigned_variable = shape_safe_assign_variable_handle( + self.handle, self.shape, restored_tensor) + except ValueError as e: + raise ValueError( + f"Received incompatible tensor with shape {restored_tensor.shape} " + f"when attempting to restore variable with shape {self.shape} " + f"and name {self.name}.") from e + return assigned_variable + + def _read_variable_op(self, no_copy=False): + """Reads the value of the variable. + + If the variable is in copy-on-read mode and `no_copy` is True, the variable + is converted to copy-on-write mode before it is read. + + Args: + no_copy: Whether to prevent a copy of the variable. + + Returns: + The value of the variable. + """ + variable_accessed(self) + + def read_and_set_handle(no_copy): + if no_copy and forward_compat.forward_compatible(2022, 5, 3): + gen_resource_variable_ops.disable_copy_on_read(self.handle) + result = gen_resource_variable_ops.read_variable_op( + self.handle, self._dtype) + _maybe_set_handle_data(self._dtype, self.handle, result) + return result + + if getattr(self, "_caching_device", None) is not None: + with ops.colocate_with(None, ignore_existing=True): + with ops.device(self._caching_device): + result = read_and_set_handle(no_copy) + else: + result = read_and_set_handle(no_copy) + + if not context.executing_eagerly(): + # Note that if a control flow context is active the input of the read op + # might not actually be the handle. This line bypasses it. + record.record_operation( + "ReadVariableOp", [result], [self.handle], + backward_function=lambda x: [x], + forward_function=lambda x: [x]) + return result + + def read_value(self): + """Constructs an op which reads the value of this variable. + + Should be used when there are multiple reads, or when it is desirable to + read the value only after some condition is true. + + Returns: + The value of the variable. + """ + with ops.name_scope("Read"): + value = self._read_variable_op() + # Return an identity so it can get placed on whatever device the context + # specifies instead of the device where the variable is. + return array_ops.identity(value) + + def read_value_no_copy(self): + """Constructs an op which reads the value of this variable without copy. + + The variable is read without making a copy even when it has been sparsely + accessed. Variables in copy-on-read mode will be converted to copy-on-write + mode. + + Returns: + The value of the variable. + """ + with ops.name_scope("Read"): + value = self._read_variable_op(no_copy=True) + # Return an identity so it can get placed on whatever device the context + # specifies instead of the device where the variable is. + return array_ops.identity(value) + + def sparse_read(self, indices, name=None): + """Reads the value of this variable sparsely, using `gather`.""" + with ops.name_scope("Gather" if name is None else name) as name: + variable_accessed(self) + value = gen_resource_variable_ops.resource_gather( + self.handle, indices, dtype=self._dtype, name=name) + + if self._dtype == dtypes.variant: + # For DT_VARIANT types, the handle's shape_and_type[1:] stores the + # variant's handle data. Extract it. + handle_data = get_eager_safe_handle_data(self.handle) + if handle_data.is_set and len(handle_data.shape_and_type) > 1: + value._handle_data = ( # pylint: disable=protected-access + cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData( + is_set=True, shape_and_type=handle_data.shape_and_type[1:])) + return array_ops.identity(value) + + return value + + def gather_nd(self, indices, name=None): + """Reads the value of this variable sparsely, using `gather_nd`.""" + with ops.name_scope("GatherNd" if name is None else name) as name: + if self.trainable: + variable_accessed(self) + value = gen_resource_variable_ops.resource_gather_nd( + self.handle, indices, dtype=self._dtype, name=name) + + return array_ops.identity(value) + + def to_proto(self, export_scope=None): + """Converts a `ResourceVariable` to a `VariableDef` protocol buffer. + + Args: + export_scope: Optional `string`. Name scope to remove. + + Raises: + RuntimeError: If run in EAGER mode. + + Returns: + A `VariableDef` protocol buffer, or `None` if the `Variable` is not + in the specified name scope. + """ + if context.executing_eagerly(): + raise RuntimeError("This operation is not supported " + "when eager execution is enabled.") + if export_scope is None or self.handle.name.startswith(export_scope): + var_def = variable_pb2.VariableDef() + var_def.variable_name = ops.strip_name_scope(self.handle.name, + export_scope) + if self._initial_value is not None: + # This is inside an if-statement for backwards compatibility, since + # self._initial_value might be None for variables constructed from old + # protos. + var_def.initial_value_name = ops.strip_name_scope( + self._initial_value.name, export_scope) + var_def.initializer_name = ops.strip_name_scope(self.initializer.name, + export_scope) + if self._cached_value is not None: + var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, + export_scope) + else: + # Store the graph_element here + var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, + export_scope) + var_def.is_resource = True + var_def.trainable = self.trainable + var_def.synchronization = self.synchronization.value + var_def.aggregation = self.aggregation.value + if self._save_slice_info: + var_def.save_slice_info_def.MergeFrom( + self._save_slice_info.to_proto(export_scope=export_scope)) + return var_def + else: + return None + + @staticmethod + def from_proto(variable_def, import_scope=None): + if context.executing_eagerly(): + raise RuntimeError("This operation is not supported " + "when eager execution is enabled.") + return ResourceVariable( + variable_def=variable_def, import_scope=import_scope) + + __array_priority__ = 100 + + def is_initialized(self, name=None): + """Checks whether a resource variable has been initialized. + + Outputs boolean scalar indicating whether the tensor has been initialized. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + return gen_resource_variable_ops.var_is_initialized_op(self.handle, name) + + def assign_sub(self, delta, use_locking=None, name=None, read_value=True): + """Subtracts a value from this variable. + + Args: + delta: A `Tensor`. The value to subtract from this variable. + use_locking: If `True`, use locking during the operation. + name: The name to use for the operation. + read_value: A `bool`. Whether to read and return the new value of the + variable or not. + + Returns: + If `read_value` is `True`, this method will return the new value of the + variable after the assignment has completed. Otherwise, when in graph mode + it will return the `Operation` that does the assignment, and when in eager + mode it will return `None`. + """ + # TODO(apassos): this here and below is not atomic. Consider making it + # atomic if there's a way to do so without a performance cost for those who + # don't need it. + with _handle_graph(self.handle), self._assign_dependencies(): + assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op( + self.handle, + ops.convert_to_tensor(delta, dtype=self.dtype), + name=name) + if read_value: + return self._lazy_read(assign_sub_op) + return assign_sub_op + + def assign_add(self, delta, use_locking=None, name=None, read_value=True): + """Adds a value to this variable. + + Args: + delta: A `Tensor`. The value to add to this variable. + use_locking: If `True`, use locking during the operation. + name: The name to use for the operation. + read_value: A `bool`. Whether to read and return the new value of the + variable or not. + + Returns: + If `read_value` is `True`, this method will return the new value of the + variable after the assignment has completed. Otherwise, when in graph mode + it will return the `Operation` that does the assignment, and when in eager + mode it will return `None`. + """ + with _handle_graph(self.handle), self._assign_dependencies(): + assign_add_op = gen_resource_variable_ops.assign_add_variable_op( + self.handle, + ops.convert_to_tensor(delta, dtype=self.dtype), + name=name) + if read_value: + return self._lazy_read(assign_add_op) + return assign_add_op + + def _lazy_read(self, op): + variable_accessed(self) + return _UnreadVariable( + handle=self.handle, + dtype=self.dtype, + shape=self._shape, + in_graph_mode=self._in_graph_mode, + parent_op=op, + unique_id=self._unique_id) + + def assign(self, value, use_locking=None, name=None, read_value=True): + """Assigns a new value to this variable. + + Args: + value: A `Tensor`. The new value for this variable. + use_locking: If `True`, use locking during the assignment. + name: The name to use for the assignment. + read_value: A `bool`. Whether to read and return the new value of the + variable or not. + + Returns: + If `read_value` is `True`, this method will return the new value of the + variable after the assignment has completed. Otherwise, when in graph mode + it will return the `Operation` that does the assignment, and when in eager + mode it will return `None`. + """ + # Note: not depending on the cached value here since this can be used to + # initialize the variable. + with _handle_graph(self.handle): + value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) + if not self._shape.is_compatible_with(value_tensor.shape): + if self.name is None: + tensor_name = "" + else: + tensor_name = " " + str(self.name) + raise ValueError( + (f"Cannot assign value to variable '{tensor_name}': Shape mismatch." + f"The variable shape {self._shape}, and the " + f"assigned value shape {value_tensor.shape} are incompatible.")) + kwargs = {} + if forward_compat.forward_compatible(2022, 3, 23): + # If the shape is fully defined, we do a runtime check with the shape of + # value. + validate_shape = self._validate_shape and self._shape.is_fully_defined() + kwargs["validate_shape"] = validate_shape + assign_op = gen_resource_variable_ops.assign_variable_op( + self.handle, value_tensor, name=name, **kwargs) + if read_value: + return self._lazy_read(assign_op) + return assign_op + + def __reduce__(self): + # The implementation mirrors that of __deepcopy__. + return functools.partial( + ResourceVariable, + initial_value=self.numpy(), + trainable=self.trainable, + name=self._shared_name, + dtype=self.dtype, + constraint=self.constraint, + distribute_strategy=self._distribute_strategy), () + + def scatter_sub(self, sparse_delta, use_locking=False, name=None): + """Subtracts `tf.IndexedSlices` from this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_sub( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_add(self, sparse_delta, use_locking=False, name=None): + """Adds `tf.IndexedSlices` to this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be added to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_add( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_max(self, sparse_delta, use_locking=False, name=None): + """Updates this variable with the max of `tf.IndexedSlices` and itself. + + Args: + sparse_delta: `tf.IndexedSlices` to use as an argument of max with this + variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_max( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_min(self, sparse_delta, use_locking=False, name=None): + """Updates this variable with the min of `tf.IndexedSlices` and itself. + + Args: + sparse_delta: `tf.IndexedSlices` to use as an argument of min with this + variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_min( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_mul(self, sparse_delta, use_locking=False, name=None): + """Multiply this variable by `tf.IndexedSlices`. + + Args: + sparse_delta: `tf.IndexedSlices` to multiply this variable by. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_mul( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_div(self, sparse_delta, use_locking=False, name=None): + """Divide this variable by `tf.IndexedSlices`. + + Args: + sparse_delta: `tf.IndexedSlices` to divide this variable by. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_div( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def scatter_update(self, sparse_delta, use_locking=False, name=None): + """Assigns `tf.IndexedSlices` to this variable. + + Args: + sparse_delta: `tf.IndexedSlices` to be assigned to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + gen_resource_variable_ops.resource_scatter_update( + self.handle, + sparse_delta.indices, + ops.convert_to_tensor(sparse_delta.values, self.dtype), + name=name)) + + def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): + """Assigns `tf.IndexedSlices` to this variable batch-wise. + + Analogous to `batch_gather`. This assumes that this variable and the + sparse_delta IndexedSlices have a series of leading dimensions that are the + same for all of them, and the updates are performed on the last dimension of + indices. In other words, the dimensions should be the following: + + `num_prefix_dims = sparse_delta.indices.ndims - 1` + `batch_dim = num_prefix_dims + 1` + `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ + batch_dim:]` + + where + + `sparse_delta.updates.shape[:num_prefix_dims]` + `== sparse_delta.indices.shape[:num_prefix_dims]` + `== var.shape[:num_prefix_dims]` + + And the operation performed can be expressed as: + + `var[i_1, ..., i_n, + sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ + i_1, ..., i_n, j]` + + When sparse_delta.indices is a 1D tensor, this operation is equivalent to + `scatter_update`. + + To avoid this operation one can looping over the first `ndims` of the + variable and using `scatter_update` on the subtensors that result of slicing + the first dimension. This is a valid option for `ndims = 1`, but less + efficient than this implementation. + + Args: + sparse_delta: `tf.IndexedSlices` to be assigned to this variable. + use_locking: If `True`, use locking during the operation. + name: the name of the operation. + + Returns: + The updated variable. + + Raises: + TypeError: if `sparse_delta` is not an `IndexedSlices`. + """ + if not isinstance(sparse_delta, indexed_slices.IndexedSlices): + raise TypeError(f"Argument `sparse_delta` must be a " + f"`tf.IndexedSlices`. Received arg: {sparse_delta}") + return self._lazy_read( + state_ops.batch_scatter_update( + self, + sparse_delta.indices, + sparse_delta.values, + use_locking=use_locking, + name=name)) + + def scatter_nd_sub(self, indices, updates, name=None): + """Applies sparse subtraction to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + op = ref.scatter_nd_sub(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(op) + ``` + + The resulting update to ref would look like this: + + [1, -9, 3, -6, -6, 6, 7, -4] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + The updated variable. + """ + return self._lazy_read( + gen_state_ops.resource_scatter_nd_sub( + self.handle, + indices, + ops.convert_to_tensor(updates, self.dtype), + name=name)) + + def scatter_nd_add(self, indices, updates, name=None): + """Applies sparse addition to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + add = ref.scatter_nd_add(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(add) + ``` + + The resulting update to ref would look like this: + + [1, 13, 3, 14, 14, 6, 7, 20] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + The updated variable. + """ + return self._lazy_read( + gen_state_ops.resource_scatter_nd_add( + self.handle, + indices, + ops.convert_to_tensor(updates, self.dtype), + name=name)) + + def scatter_nd_update(self, indices, updates, name=None): + """Applies sparse assignment to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + op = ref.scatter_nd_update(indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(op) + ``` + + The resulting update to ref would look like this: + + [1, 11, 3, 10, 9, 6, 7, 12] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + The updated variable. + """ + return self._lazy_read( + gen_state_ops.resource_scatter_nd_update( + self.handle, + indices, + ops.convert_to_tensor(updates, self.dtype), + name=name)) + + def scatter_nd_max(self, indices, updates, name=None): + """Updates this variable with the max of `tf.IndexedSlices` and itself. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + The updated variable. + """ + return self._lazy_read( + gen_state_ops.resource_scatter_nd_max( + self.handle, + indices, + ops.convert_to_tensor(updates, self.dtype), + name=name)) + + def scatter_nd_min(self, indices, updates, name=None): + """Updates this variable with the min of `tf.IndexedSlices` and itself. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + indices: The indices to be used in the operation. + updates: The values to be used in the operation. + name: the name of the operation. + + Returns: + The updated variable. + """ + return self._lazy_read( + gen_state_ops.resource_scatter_nd_min( + self.handle, + indices, + ops.convert_to_tensor(updates, self.dtype), + name=name)) + + def _write_object_proto(self, proto, options): + """Writes additional information of the variable into the SavedObject proto. + + Subclasses of ResourceVariables could choose to override this method to + customize extra information to provide when saving a SavedModel. + + Ideally, this should contain the logic in + write_object_proto_for_resource_variable but `DistributedValue` is an + outlier at the momemnt. Once `DistributedValue` becomes a proper + ResourceVariable, we should remove the helper method below. + + Args: + proto: `SavedObject` proto to update. + options: A `SaveOption` instance that configures save behavior. + """ + write_object_proto_for_resource_variable(self, proto, options) + + def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask, + end_mask, ellipsis_mask, new_axis_mask, + shrink_axis_mask): + with _handle_graph(self.handle), self._assign_dependencies(): + return self._lazy_read( + gen_array_ops.resource_strided_slice_assign( + ref=self.handle, + begin=begin, + end=end, + strides=strides, + value=ops.convert_to_tensor(value, dtype=self.dtype), + name=name, + begin_mask=begin_mask, + end_mask=end_mask, + ellipsis_mask=ellipsis_mask, + new_axis_mask=new_axis_mask, + shrink_axis_mask=shrink_axis_mask)) + + def __complex__(self): + return complex(self.value().numpy()) + + def __int__(self): + return int(self.value().numpy()) + + def __long__(self): + return long(self.value().numpy()) + + def __float__(self): + return float(self.value().numpy()) + + def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): + del name + if dtype is not None and not dtype.is_compatible_with(self.dtype): + raise ValueError( + f"Incompatible type conversion requested to type {dtype.name} for " + f"`tf.Variable of type {self.dtype.name}. (Variable: {self})") + if as_ref: + return self.read_value().op.inputs[0] + else: + return self.value() + + def __iadd__(self, unused_other): + raise RuntimeError("`variable += value` with `tf.Variable`s is not " + "supported. Use `variable.assign_add(value)` to modify " + "the variable, or `out = variable + value` if you " + "need to get a new output Tensor.") + + def __isub__(self, unused_other): + raise RuntimeError("`variable -= value` with `tf.Variable`s is not " + "supported. Use `variable.assign_sub(value)` to modify " + "the variable, or `out = variable * value` if you " + "need to get a new output Tensor.") + + def __imul__(self, unused_other): + raise RuntimeError("`var *= value` with `tf.Variable`s is not " + "supported. Use `var.assign(var * value)` to modify " + "the variable, or `out = var * value` if you " + "need to get a new output Tensor.") + + def __idiv__(self, unused_other): + raise RuntimeError("`var /= value` with `tf.Variable`s is not " + "supported. Use `var.assign(var / value)` to modify " + "the variable, or `out = var / value` if you " + "need to get a new output Tensor.") + + def __itruediv__(self, unused_other): + raise RuntimeError("`var /= value` with `tf.Variable`s is not " + "supported. Use `var.assign(var / value)` to modify " + "the variable, or `out = var / value` if you " + "need to get a new output Tensor.") + + def __irealdiv__(self, unused_other): + raise RuntimeError("`var /= value` with `tf.Variable`s is not " + "supported. Use `var.assign(var / value)` to modify " + "the variable, or `out = var / value` if you " + "need to get a new output Tensor.") + + def __ipow__(self, unused_other): + raise RuntimeError("`var **= value` with `tf.Variable`s is not " + "supported. Use `var.assign(var ** value)` to modify " + "the variable, or `out = var ** value` if you " + "need to get a new output Tensor.") + + +class ResourceVariableGradient( + composite_tensor_gradient.CompositeTensorGradient): + """CompositeTensorGradient protocol for ResourceVariable.""" + + # TODO(b/246997907): update this method to return value.handle. + def get_gradient_components(self, value): + """Returns the components of `value` that should be included in gradients. + + For a ResourceVariable, its gradient component is its handle tensor. + For now, we return the ResourceVariable because the gradient infrastructure + has special logics to handle ResourceVariables. We should remove those + special logics and return the handle tensor. + + Args: + value: A `ResourceVariable`. + + Returns: + `value` itself. + """ + return value + + def replace_gradient_components(self, value, component_grads): + """Replaces the gradient components in `value` with `component_grads`. + + The gradient of a ResourceVariable is either None or a Tensor. So we don't + need `value`'s TypeSpec or non-gradient components in this method. + + Args: + value: A `ResourceVariable` with its gradient components compatible with + `component_grads`. + component_grads: A `Tensor` or None as the gradient result. + + Returns: + The `component_grads`, which is either a `Tensor` or None. + """ + return component_grads + + +class ResourceVariable(BaseResourceVariable, composite_tensor.CompositeTensor): + """Variable based on resource handles. + + See the [Variables How To](https://tensorflow.org/guide/variables) + for a high level overview. + + A `ResourceVariable` allows you to maintain state across subsequent calls to + session.run. + + The `ResourceVariable` constructor requires an initial value for the variable, + which can be a `Tensor` of any type and shape. The initial value defines the + type and shape of the variable. After construction, the type and shape of + the variable are fixed. The value can be changed using one of the assign + methods. + + Just like any `Tensor`, variables created with + `tf.Variable(use_resource=True)` can be used as inputs for other Ops in the + graph. Additionally, all the operators overloaded for the `Tensor` class are + carried over to variables, so you can also add nodes to the graph by just + doing arithmetic on variables. + + Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each + usage of a ResourceVariable in a TensorFlow graph adds a read_value operation + to the graph. The Tensors returned by a read_value operation are guaranteed to + see all modifications to the value of the variable which happen in any + operation on which the read_value depends on (either directly, indirectly, or + via a control dependency) and guaranteed to not see any modification to the + value of the variable from operations that depend on the read_value operation. + Updates from operations that have no dependency relationship to the read_value + operation might or might not be visible to read_value. + + For example, if there is more than one assignment to a ResourceVariable in + a single session.run call there is a well-defined value for each operation + which uses the variable's value if the assignments and the read are connected + by edges in the graph. Consider the following example, in which two writes + can cause tf.Variable and tf.ResourceVariable to behave differently: + + ```python + a = tf.Variable(1.0, use_resource=True) + a.initializer.run() + + assign = a.assign(2.0) + with tf.control_dependencies([assign]): + b = a.read_value() + with tf.control_dependencies([b]): + other_assign = a.assign(3.0) + with tf.control_dependencies([other_assign]): + # Will print 2.0 because the value was read before other_assign ran. If + # `a` was a tf.Variable instead, 2.0 or 3.0 could be printed. + tf.compat.v1.Print(b, [b]).eval() + ``` + """ + + def __init__( + self, # pylint: disable=super-init-not-called + initial_value=None, + trainable=None, + collections=None, + validate_shape=True, # pylint: disable=unused-argument + caching_device=None, + name=None, + dtype=None, + variable_def=None, + import_scope=None, + constraint=None, + distribute_strategy=None, + synchronization=None, + aggregation=None, + shape=None, + handle=None, + experimental_enable_variable_lifting=None, + ): + """Creates a variable. + + Args: + initial_value: A `Tensor`, or Python object convertible to a `Tensor`, + which is the initial value for the Variable. Can also be a callable with + no argument that returns the initial value when called. (Note that + initializer functions from init_ops.py must first be bound to a shape + before being used here.) + trainable: If `True`, the default, also adds the variable to the graph + collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as + the default list of variables to use by the `Optimizer` classes. + Defaults to `True`, unless `synchronization` is set to `ON_READ`, in + which case it defaults to `False`. + collections: List of graph collections keys. The new variable is added to + these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + validate_shape: If `False`, allows the variable to be initialized with a + value of unknown shape. If `True`, the default, the shape of + `initial_value` must be known. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + dtype: If set, initial_value will be converted to the given type. If None, + either the datatype will be kept (if initial_value is a Tensor) or + float32 will be used (if it is a Python object convertible to a Tensor). + variable_def: `VariableDef` protocol buffer. If not None, recreates the + `ResourceVariable` object with its contents. `variable_def` and other + arguments (except for import_scope) are mutually exclusive. + import_scope: Optional `string`. Name scope to add to the + ResourceVariable. Only used when `variable_def` is provided. + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + distribute_strategy: The tf.distribute.Strategy this variable is being + created inside of. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + shape: (optional) The shape of this variable. If None, the shape of + `initial_value` will be used. When setting this argument to + `tf.TensorShape(None)` (representing an unspecified shape), the variable + can be assigned with values of different shapes. + handle: (optional) The handle of a `tf.Variable`. If provided, only + `trainable`, `shape`, `dtype`, and `handle` will be used to construct + this `tf.Variable`. + experimental_enable_variable_lifting: Whether to lift the variable out if + it's in a `tf.function`. Default is `True`. When this argument + is `True`, variable creation will follow the behavior and + restrictions described + [here](https://www.tensorflow.org/guide/function#creating_tfvariables). + If this argument is `False`, that description doesn't apply, + and you can freely create and use the variable in the + `tf.function`, as if it's a "mutable `tf.Tensor`". You can't + return the variable though. + + Raises: + ValueError: If the initial value is not specified, or does not have a + shape and `validate_shape` is `True`. + + @compatibility(eager) + When Eager Execution is enabled, the default for the `collections` argument + is `None`, which signifies that this `Variable` will not be added to any + collections. + @end_compatibility + """ + if variable_def: + if initial_value is not None: + raise ValueError(f"The variable_def and initial_value args to " + f"`tf.Variable` are mutually exclusive, but got both: " + f"variable_def={variable_def},\n" + f"initial_value={initial_value}") + if context.executing_eagerly(): + raise ValueError(f"Creating a `tf.Variable` with a `variable_def` arg " + f"is not supported when eager execution is enabled. " + f"Got: variable_def={variable_def}") + self._init_from_proto( + variable_def, + import_scope=import_scope, + validate_shape=validate_shape) + elif handle is not None: + self._init_from_handle(trainable=trainable, + shape=shape, + dtype=dtype, + handle=handle) + else: + self._init_from_args( + initial_value=initial_value, + trainable=trainable, + collections=collections, + caching_device=caching_device, + name=name, + dtype=dtype, + constraint=constraint, + synchronization=synchronization, + aggregation=aggregation, + shape=shape, + distribute_strategy=distribute_strategy, + validate_shape=validate_shape, + experimental_enable_variable_lifting=experimental_enable_variable_lifting, + ) + + # CompositeTensor method + @property + def _type_spec(self): + return VariableSpec.from_value(self) + + # CompositeTensor method + def _shape_invariant_to_type_spec(self, shape): + return VariableSpec(shape, self.dtype, self.trainable) + + # CompositeTensorGradient protocol + __composite_gradient__ = ResourceVariableGradient() + + def _init_from_args( + self, + initial_value=None, + trainable=None, + collections=None, + caching_device=None, + name=None, + dtype=None, + constraint=None, + synchronization=None, + aggregation=None, + distribute_strategy=None, + shape=None, + validate_shape=True, + experimental_enable_variable_lifting=None, + ): + """Creates a variable. + + Args: + initial_value: A `Tensor`, or Python object convertible to a `Tensor`, + which is the initial value for the Variable. The initial value must have + a shape specified unless `validate_shape` is set to False. Can also be a + callable with no argument that returns the initial value when called. + (Note that initializer functions from init_ops.py must first be bound to + a shape before being used here.) + trainable: If `True`, the default, also adds the variable to the graph + collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as + the default list of variables to use by the `Optimizer` classes. + Defaults to `True`, unless `synchronization` is set to `ON_READ`, in + which case it defaults to `False`. + collections: List of graph collections keys. The new variable is added to + these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + dtype: If set, initial_value will be converted to the given type. If None, + either the datatype will be kept (if initial_value is a Tensor) or + float32 will be used (if it is a Python object convertible to a Tensor). + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + distribute_strategy: DistributionStrategy under which this variable was + created. + shape: (optional) The shape of this variable. If None, the shape of + `initial_value` will be used. When setting this argument to + `tf.TensorShape(None)` (representing an unspecified shape), the variable + can be assigned with values of different shapes. + validate_shape: If `False`, allows the variable to be initialized with a + value of unknown shape. If `True`, the default, the shape of + `initial_value` must be known. + experimental_enable_variable_lifting: Whether to lift the variable out if + it's in a `tf.function`. Default is `True`. When this argument + is `True`, variable creation will follow the behavior and + restrictions described + [here](https://www.tensorflow.org/guide/function#creating_tfvariables). + If this argument is `False`, that description doesn't apply, + and you can freely create and use the variable in the + `tf.function`, as if it's a "mutable `tf.Tensor`". You can't + return the variable though. + + Raises: + ValueError: If the initial value is not specified, or does not have a + shape and `validate_shape` is `True`. + + @compatibility(eager) + When Eager Execution is enabled, variables are never added to collections. + It is not implicitly added to the `GLOBAL_VARIABLES` or + `TRAINABLE_VARIABLES` collections, and the `collections` argument is + ignored. + @end_compatibility + """ + synchronization, aggregation, trainable = ( + variables.validate_synchronization_aggregation_trainable( + synchronization, aggregation, trainable, name)) + if experimental_enable_variable_lifting is None: + experimental_enable_variable_lifting = True + if initial_value is None: + raise ValueError("The `initial_value` arg to `tf.Variable` must " + "be specified except when you are not providing a " + "`variable_def`. You provided neither.") + init_from_fn = callable(initial_value) + + if isinstance(initial_value, tensor_module.Tensor) and hasattr( + initial_value, "graph") and initial_value.graph.building_function: + raise ValueError(f"Argument `initial_value` ({initial_value}) could not " + "be lifted out of a `tf.function`. " + f"(Tried to create variable with name='{name}'). " + "To avoid this error, when constructing `tf.Variable`s " + "inside of `tf.function` you can create the " + "`initial_value` tensor in a " + "`tf.init_scope` or pass a callable `initial_value` " + "(e.g., `tf.Variable(lambda : " + "tf.truncated_normal([10, 40]))`). " + "Please file a feature request if this " + "restriction inconveniences you.") + + if collections is None: + collections = [ops.GraphKeys.GLOBAL_VARIABLES] + if not isinstance(collections, (list, tuple, set)): + raise ValueError( + f"collections argument to Variable constructor must be a list, " + f"tuple, or set. Got {collections} of type {type(collections)}") + if constraint is not None and not callable(constraint): + raise ValueError(f"Argument `constraint` must be None or a callable. " + f"a callable. Got a {type(constraint)}: {constraint}") + + if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: + collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] + with ops.init_scope(): + self._in_graph_mode = not context.executing_eagerly() + if experimental_enable_variable_lifting: + maybe_init_scope = ops.init_scope + else: + maybe_init_scope = contextlib.nullcontext + with maybe_init_scope(): + with ops.name_scope( + name, + "Variable", [] if init_from_fn else [initial_value], + skip_on_eager=False) as name: + # pylint: disable=protected-access + handle_name = ops.name_from_scope_name(name) + if self._in_graph_mode: + shared_name = handle_name + unique_id = shared_name + else: + # When in eager mode, use a uid for the shared_name, to prevent + # accidental sharing. + unique_id = "%s_%d" % (handle_name, ops.uid()) + shared_name = None # Never shared + # Use attr_scope and device(None) to simulate the behavior of + # colocate_with when the variable we want to colocate with doesn't + # yet exist. + device_context_manager = ( + ops.device if self._in_graph_mode else ops.NullContextmanager) + attr = attr_value_pb2.AttrValue( + list=attr_value_pb2.AttrValue.ListValue( + s=[compat.as_bytes("loc:@%s" % handle_name)])) + with ops.get_default_graph()._attr_scope({"_class": attr}): + with ops.name_scope("Initializer"), device_context_manager(None): + if init_from_fn: + initial_value = initial_value() + if isinstance(initial_value, trackable.CheckpointInitialValue): + self._maybe_initialize_trackable() + self._update_uid = initial_value.checkpoint_position.restore_uid + initial_value = initial_value.wrapped_value + initial_value = ops.convert_to_tensor( + initial_value, name="initial_value", dtype=dtype) + if shape is not None: + if not initial_value.shape.is_compatible_with(shape): + raise ValueError( + f"In this `tf.Variable` creation, the initial value's shape " + f"({initial_value.shape}) is not compatible with " + f"the explicitly supplied `shape` argument ({shape}).") + else: + shape = initial_value.shape + handle = eager_safe_variable_handle( + initial_value=initial_value, + shape=shape, + shared_name=shared_name, + name=name, + graph_mode=self._in_graph_mode) + handle._parent_trackable = weakref.ref(self) + handle._name = handle_name + ":0" + handle._unique_id = unique_id + # pylint: disable=protected-access + if (self._in_graph_mode and initial_value is not None and + initial_value.op._get_control_flow_context() is not None): + raise ValueError( + f"The `initial_value` passed to `tf.Variable` {name} is from " + f"inside a control-flow construct, such as a loop or " + f"conditional. When creating a " + f"`tf.Variable` inside a loop or conditional, use a lambda as " + f"the `initial_value`. Got: initial_value=({initial_value})") + # pylint: enable=protected-access + dtype = initial_value.dtype.base_dtype + + if self._in_graph_mode: + with ops.name_scope("IsInitialized"): + is_initialized_op = ( + gen_resource_variable_ops.var_is_initialized_op(handle)) + if initial_value is not None: + # pylint: disable=g-backslash-continuation + with ops.name_scope("Assign") as n, \ + ops.colocate_with(None, ignore_existing=True), \ + ops.device(handle.device): + # pylint: disable=protected-access + initializer_op = ( + gen_resource_variable_ops.assign_variable_op( + handle, + variables._try_guard_against_uninitialized_dependencies( + name, initial_value), + name=n)) + # pylint: enable=protected-access + # pylint: enable=g-backslash-continuation + with ops.name_scope("Read"): + # Manually assign reads to the handle's device to avoid log + # messages. + with ops.device(handle.device): + value = gen_resource_variable_ops.read_variable_op(handle, dtype) + _maybe_set_handle_data(dtype, handle, value) + graph_element = value + if caching_device is not None: + # Variables may be created in a tf.device() or ops.colocate_with() + # context. At the same time, users would expect caching device to + # be independent of this context, and/or would not expect the + # current device context to be merged with the caching device + # spec. Therefore we reset the colocation stack before creating + # the cached value. Note that resetting the colocation stack will + # also reset the device stack. + with ops.colocate_with(None, ignore_existing=True): + with ops.device(caching_device): + cached_value = array_ops.identity(value) + else: + cached_value = None + else: + gen_resource_variable_ops.assign_variable_op(handle, initial_value) + is_initialized_op = None + initializer_op = None + graph_element = None + if caching_device: + with ops.device(caching_device): + cached_value = gen_resource_variable_ops.read_variable_op( + handle, dtype) + _maybe_set_handle_data(dtype, handle, cached_value) + else: + cached_value = None + + if cached_value is not None: + # Store the variable object so that the original variable can be + # accessed to generate functions that are compatible with SavedModel. + cached_value._cached_variable = weakref.ref(self) # pylint: disable=protected-access + + if self._in_graph_mode: + # Eager variables are only added to collections if they are part of an + # eager variable store (otherwise in an interactive session they would + # hog memory and cause OOM). This is done in ops/variable_scope.py. + ops.add_to_collections(collections, self) + elif ops.GraphKeys.GLOBAL_STEP in collections: + ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self) + initial_value = initial_value if self._in_graph_mode else None + super(ResourceVariable, self).__init__( + trainable=trainable, + shape=shape, + dtype=dtype, + handle=handle, + synchronization=synchronization, + constraint=constraint, + aggregation=aggregation, + distribute_strategy=distribute_strategy, + name=name, + unique_id=unique_id, + handle_name=handle_name, + graph_element=graph_element, + initial_value=initial_value, + initializer_op=initializer_op, + is_initialized_op=is_initialized_op, + cached_value=cached_value, + caching_device=caching_device, + validate_shape=validate_shape, + ) + + def _init_from_proto(self, + variable_def, + import_scope=None, + validate_shape=True): + """Initializes from `VariableDef` proto.""" + # Note that init_from_proto is currently not supported in Eager mode. + assert not context.executing_eagerly() + self._in_graph_mode = True + assert isinstance(variable_def, variable_pb2.VariableDef) + if not variable_def.is_resource: + raise ValueError(f"The `variable_def` you passed to `tf.Variable` is " + f"Trying to restore a TF 1.x Reference Variable " + f"as a TF 2.x ResourceVariable. This is unsupported. " + f"Got variable_def={variable_def}") + + # Create from variable_def. + g = ops.get_default_graph() + self._handle = g.as_graph_element( + ops.prepend_name_scope( + variable_def.variable_name, import_scope=import_scope), + allow_operation=False) + self._shape = tensor_shape.TensorShape(self._handle.op.get_attr("shape")) + self._handle_name = self._handle.name + self._unique_id = self._handle_name + self._initializer_op = g.as_graph_element( + ops.prepend_name_scope( + variable_def.initializer_name, import_scope=import_scope)) + # Check whether initial_value_name exists for backwards compatibility. + if (hasattr(variable_def, "initial_value_name") and + variable_def.initial_value_name): + self._initial_value = g.as_graph_element( + ops.prepend_name_scope( + variable_def.initial_value_name, import_scope=import_scope)) + else: + self._initial_value = None + synchronization, aggregation, trainable = ( + variables.validate_synchronization_aggregation_trainable( + variable_def.synchronization, variable_def.aggregation, + variable_def.trainable, variable_def.variable_name)) + self._synchronization = synchronization + self._aggregation = aggregation + self._trainable = trainable + if variable_def.snapshot_name: + snapshot = g.as_graph_element( + ops.prepend_name_scope( + variable_def.snapshot_name, import_scope=import_scope)) + if snapshot.op.type != "ReadVariableOp": + self._cached_value = snapshot + else: + self._cached_value = None + while snapshot.op.type != "ReadVariableOp": + snapshot = snapshot.op.inputs[0] + self._graph_element = snapshot + else: + self._cached_value = None + # Legacy case for protos without the snapshot name; assume it's the + # following. + self._graph_element = g.get_tensor_by_name(self._handle.op.name + + "/Read/ReadVariableOp:0") + if variable_def.HasField("save_slice_info_def"): + self._save_slice_info = variables.Variable.SaveSliceInfo( + save_slice_info_def=variable_def.save_slice_info_def, + import_scope=import_scope) + else: + self._save_slice_info = None + self._caching_device = None + self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype")) + self._constraint = None + self._validate_shape = validate_shape + + def _init_from_handle(self, + trainable=None, + shape=None, + dtype=None, + handle=None): + handle_data = get_eager_safe_handle_data(handle) + if not handle_data.is_set: + # The handle may not have the handle shape and dtype if it was created + # using tf.placeholder. + handle_data = handle_data_util.create_handle_data(shape, dtype) + handle_data_util.set_handle_data(handle, handle_data) + # pylint: disable=protected-access + if hasattr(handle, "_name") and isinstance(handle._name, str): + handle_name = handle._name.rstrip(":0") + else: + handle_name = None + # pylint: enable=protected-access + unique_id = getattr(handle, "_unique_id", None) + super().__init__( + trainable=trainable, shape=shape, dtype=dtype, handle=handle, + unique_id=unique_id, handle_name=handle_name) + + +class UninitializedVariable(BaseResourceVariable): + """A variable with no initializer.""" + + def __init__( # pylint: disable=super-init-not-called + self, + trainable=None, + caching_device=None, + name=None, + shape=None, + dtype=None, + constraint=None, + synchronization=None, + aggregation=None, + extra_handle_data=None, + distribute_strategy=None, + **unused_kwargs): + """Creates the variable handle. + + Args: + trainable: If `True`, GradientTapes automatically watch uses of this + Variable. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + shape: The variable's shape. + dtype: The variable's dtype. + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + synchronization: Indicates when a distributed a variable will be + aggregated. Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + extra_handle_data: Optional, another resource handle or Tensor with handle + data to merge with `shape` and `dtype`. + distribute_strategy: The tf.distribute.Strategy this variable is being + created inside of. + """ + with ops.init_scope(): + # Here we are detecting eagerness within an init_scope, so this will only + # be true when we are running in TF1 graph mode. + self._in_graph_mode = not context.executing_eagerly() + with ops.name_scope(name, "Variable", skip_on_eager=False) as name: + handle_name = ops.name_from_scope_name(name) + if self._in_graph_mode: + shared_name = handle_name + unique_id = shared_name + else: + unique_id = "%s_%d" % (handle_name, ops.uid()) + shared_name = None # Never shared + handle = _variable_handle_from_shape_and_dtype( + shape=shape, + dtype=dtype, + shared_name=shared_name, + name=name, + graph_mode=self._in_graph_mode, + initial_value=extra_handle_data) + handle._parent_trackable = weakref.ref(self) + handle._name = handle_name + ":0" + handle._unique_id = unique_id + + if self._in_graph_mode: + # We only need to add the read_variable_op in TF1. + with ops.name_scope("Read"): + # Manually assign reads to the handle's device to avoid log + # messages. + with ops.device(handle.device): + value = gen_resource_variable_ops.read_variable_op(handle, dtype) + _maybe_set_handle_data(dtype, handle, value) + graph_element = value + ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self) + # Do *not* add to TRAINABLE_VARIABLES here, even if self._trainable, + # because retraining or frozen use of imported SavedModels is + # controlled at higher levels of model building. + else: + graph_element = None + super(UninitializedVariable, self).__init__( + distribute_strategy=distribute_strategy, + shape=shape, + dtype=dtype, + unique_id=unique_id, + handle_name=handle_name, + constraint=constraint, + handle=handle, + graph_element=graph_element, + trainable=trainable, + synchronization=synchronization, + aggregation=aggregation, + in_graph_mode=self._in_graph_mode, **unused_kwargs) + + +def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False): + return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access + + +# Register a conversion function which reads the value of the variable, +# allowing instances of the class to be used as tensors. +tensor_conversion_registry.register_tensor_conversion_function( + BaseResourceVariable, _dense_var_to_tensor) + + +class _UnreadVariable(BaseResourceVariable): + """Represents a future for a read of a variable. + + Pretends to be the tensor if anyone looks. + """ + + def __init__(self, handle, dtype, shape, in_graph_mode, parent_op, unique_id): + if isinstance(handle, ops.EagerTensor): + handle_name = "" + else: + handle_name = handle.name + # Only create a graph_element if we're in session.run-land as only + # session.run requires a preexisting tensor to evaluate. Otherwise we can + # avoid accidentally reading the variable. + if context.executing_eagerly() or ops.inside_function(): + graph_element = None + else: + with ops.control_dependencies([parent_op]): + graph_element = gen_resource_variable_ops.read_variable_op( + handle, dtype) + _maybe_set_handle_data(dtype, handle, graph_element) + super(_UnreadVariable, self).__init__( + handle=handle, + shape=shape, + handle_name=handle_name, + unique_id=unique_id, + dtype=dtype, + graph_element=graph_element) + self._parent_op = parent_op + + @property + def name(self): + if self._in_graph_mode: + return self._parent_op.name + else: + return "UnreadVariable" + + def value(self): + return self._read_variable_op() + + def read_value(self): + return self._read_variable_op() + + def _read_variable_op(self): + with ops.control_dependencies([self._parent_op]): + result = gen_resource_variable_ops.read_variable_op( + self._handle, self._dtype) + _maybe_set_handle_data(self._dtype, self._handle, result) + return result + + def assign_sub(self, delta, use_locking=None, name=None, read_value=True): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).assign_sub(delta, use_locking, name, + read_value) + + def assign_add(self, delta, use_locking=None, name=None, read_value=True): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).assign_add(delta, use_locking, name, + read_value) + + def assign(self, value, use_locking=None, name=None, read_value=True): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).assign(value, use_locking, name, + read_value) + + def scatter_sub(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_sub(sparse_delta, use_locking, + name) + + def scatter_add(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_add(sparse_delta, use_locking, + name) + + def scatter_max(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_max(sparse_delta, use_locking, + name) + + def scatter_min(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_min(sparse_delta, use_locking, + name) + + def scatter_mul(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_mul(sparse_delta, use_locking, + name) + + def scatter_div(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_div(sparse_delta, use_locking, + name) + + def scatter_update(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, + self).scatter_update(sparse_delta, use_locking, name) + + def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, + self).batch_scatter_update(sparse_delta, use_locking, name) + + def scatter_nd_sub(self, indices, updates, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_nd_sub(indices, updates, name) + + def scatter_nd_add(self, indices, updates, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_nd_add(indices, updates, name) + + def scatter_nd_update(self, indices, updates, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, + self).scatter_nd_update(indices, updates, name) + + def scatter_nd_max(self, indices, updates, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_nd_max(indices, updates, name) + + def scatter_nd_min(self, indices, updates, name=None): + with ops.control_dependencies([self._parent_op]): + return super(_UnreadVariable, self).scatter_nd_min(indices, updates, name) + + @property + def op(self) -> ops.Operation: + """The op for this variable.""" + return self._parent_op + + +@ops.RegisterGradient("ReadVariableOp") +def _ReadGrad(_, grad): + """Gradient for read op.""" + return grad + + +def variable_shape(handle, out_type=dtypes.int32): + handle_data = get_eager_safe_handle_data(handle) + if handle_data is None or not handle_data.is_set: + return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) + shape_proto = handle_data.shape_and_type[0].shape + if shape_proto.unknown_rank or any(x.size == -1 for x in shape_proto.dim): + return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) + return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type) + + +@ops.RegisterGradient("ResourceGather") +def _GatherGrad(op, grad): + """Gradient for gather op.""" + # Build appropriately shaped IndexedSlices + handle = op.inputs[0] + indices = op.inputs[1] + params_shape = variable_shape(handle) + size = array_ops.expand_dims(array_ops.size(indices), 0) + values_shape = array_ops.concat([size, params_shape[1:]], 0) + values = array_ops.reshape(grad, values_shape) + indices = array_ops.reshape(indices, size) + return (indexed_slices.IndexedSlices(values, indices, params_shape), None) + + +@tf_export("__internal__.ops.is_resource_variable", v1=[]) +def is_resource_variable(var): + """"Returns True if `var` is to be considered a ResourceVariable.""" + return isinstance(var, BaseResourceVariable) or hasattr( + var, "_should_act_as_resource_variable") + + +def copy_to_graph_uninitialized(var): + """Copies an existing variable to a new graph, with no initializer.""" + # Like ResourceVariable.__deepcopy__, but does not set an initializer on the + # new variable. + # pylint: disable=protected-access + new_variable = UninitializedVariable( + trainable=var.trainable, + constraint=var._constraint, + shape=var.shape, + dtype=var.dtype, + name=var._shared_name, + synchronization=var.synchronization, + aggregation=var.aggregation, + extra_handle_data=var.handle) + new_variable._maybe_initialize_trackable() + # pylint: enable=protected-access + return new_variable + + +ops.NotDifferentiable("Assert") +ops.NotDifferentiable("VarIsInitializedOp") +ops.NotDifferentiable("VariableShape") + + +# TODO(b/246356867): This is the draft implementation. Currently VariableSpec is +# the only class using them. Move them to a separate file when necessary. +class StructurePattern: + pass + + +class PLeaf(StructurePattern): + """Represents a singleton leaf StructurePattern.""" + + def __new__(cls): + if not hasattr(cls, "instance"): + cls.instance = super().__new__(cls) + return cls.instance + + +class PList(StructurePattern): + """Represents a list of StructurePatterns.""" + + def __init__(self, *components): + self.components = list(components) + + def __eq__(self, other): + return isinstance(other, PList) and self.components == other.components + + +class VariableSpec(tensor_module.DenseSpec): + """Describes a tf.Variable. + + A `VariableSpec` provides metadata describing the `tf.Variable` objects + accepted or returned by TensorFlow 2.x APIs. + """ + + __slots__ = ["trainable", "alias_id"] + + value_type = property(lambda self: ResourceVariable) + + def __init__(self, shape, dtype=dtypes.float32, trainable=True, + alias_id=None): + super(VariableSpec, self).__init__(shape, dtype=dtype) + self.trainable = trainable + self.alias_id = alias_id + + def is_compatible_with(self, spec_or_value): + """Returns True if `spec_or_value` is compatible with this `VariableSpec`. + + `spec_or_value` is considered to be compatible with this `VariableSpec` if + + * `spec_or_value` is a `Variable` or `VariableSpec`, + * their shapes are compatible, + * their dtypes are the same, + * they are both trainable or not trainable. + * they share the same alias_id if `spec_or_value` is a `VariableSpec`. + + Example: + + >>> v = tf.Variable([1., 2., 3.]) + >>> spec = VariableSpec([None]) + >>> spec.is_compatible_with(v) + True + >>> v = tf.Variable(1) + >>> spec.is_compatible_with(v) + False + + Args: + spec_or_value: A VariableSpec or Variable to compare against. + + Returns: + True if `spec_or_value` is compatible with this `VariableSpec`. + """ + if not isinstance(spec_or_value, (type(self), self.value_type)): + return False + compatible = (self.shape.is_compatible_with(spec_or_value.shape) and + self.dtype == spec_or_value.dtype and + self.trainable == spec_or_value.trainable) + if isinstance(spec_or_value, type(self)): + # alias_id must be the same to be compatible. + return compatible and self.alias_id == spec_or_value.alias_id + return compatible + + @classmethod + def from_value(cls, value): + """Creates a `VariableSpec` from the given `Variable`. + + `value`'s shape, dtype, and trainable attributes will be used to create + the new `VariableSpec`. + + Example: + + >>> v = tf.Variable([1., 2., 3.]) + >>> VariableSpec.from_value(v) + VariableSpec(shape=(3,), dtype=tf.float32, trainable=True, alias_id=None) + + Args: + value: A Variable. + + Returns: + A `VariableSpec` created from `value`. + """ + return cls(value.shape, dtype=value.dtype, trainable=value.trainable) + + def _to_components(self, value): + return [value.handle] + + def _from_components(self, components): + if not isinstance(components, (list, tuple)): + raise TypeError(f"Components of a ResourceVariable must be a list or " + f"tuple, got f{components} instead.") + if len(components) != 1: + raise ValueError(f"Components of a ResourceVariable must only contain " + f"its resource handle, got f{components} instead.") + handle = components[0] + if not isinstance( + handle, tensor_module.Tensor) or handle.dtype != dtypes.resource: + raise ValueError(f"The handle of a ResourceVariable must be a resource " + f"tensor, got {handle} instead.") + return ResourceVariable(trainable=self.trainable, + shape=self.shape, + dtype=self.dtype, + handle=handle) + + @property + def _component_specs(self): + return [ + tensor_module.TensorSpec( + [], + dtypes.DType( + dtypes.resource._type_enum, # pylint: disable=protected-access + dtypes.HandleData(alias_id=self.alias_id), + ), + ) + ] + + def _serialize(self): + return self.shape, self.dtype, self.trainable, self.alias_id + + # TraceType method + def is_subtype_of(self, other): + if type(self) is not type(other): + return False + + # Remove this once we add alias_id to all CompositeTensors with + # ResourceVariable components. + if self.alias_id is None and other.alias_id is None: + return super().is_subtype_of(other) + + if self.alias_id is None or other.alias_id is None: + raise NotImplementedError(f"VariableSpec.is_subtype_of doesn't support " + f"alias_id=None, got self: {self} and other: " + f"{other}.") + + return super().is_subtype_of(other) + + # TraceType method + def most_specific_common_supertype(self, others): + if any(type(self) is not type(other) for other in others): + return None + + # It is a special case for tf.nest, which often takes CompositeTensors and + # converts to TypeSpecs internally, such as tf.nest.assert_same_structure. + if (self.alias_id is None and + all(other.alias_id is None for other in others)): + return super().most_specific_common_supertype(others) + + if self.alias_id is None or any(other.alias_id is None for other in others): + raise NotImplementedError(f"VariableSpec.most_specific_common_supertype " + f"doesn't support alias_id=None, got self: " + f"{self} and others: {others}.") + + return super().most_specific_common_supertype(others) + + # TraceType method + def placeholder_value(self, placeholder_context): + if placeholder_context.unnest_only: + return self + + name = self.name or placeholder_context.naming_scope + context_graph = placeholder_context.context_graph + if placeholder_context.has_placeholder(self.alias_id): + # Get reference to the existing variable if alias_id already + # exists in the PlaceholderContext + variable = placeholder_context.get_placeholder(self.alias_id) + else: + spec = tensor_module.TensorSpec([], dtypes.resource) + spec_context = trace_type.InternalPlaceholderContext( + context_graph.outer_graph) + spec_context.update_naming_scope(name) + placeholder = spec.placeholder_value(spec_context) + variable = self._from_components([placeholder]) + # (b/262771247) ShardedVariable break without this and VariableSpecs + # without alias_id are not TraceTypes. + if self.alias_id is not None: + placeholder_context.add_placeholder(self.alias_id, variable) + # Capture the Variable's placeholder within the default graph of + # the current thread. + placeholder = context_graph.capture(variable.handle, name=name) + placeholder.op._set_attr( # pylint: disable=protected-access + "_user_specified_name", + attr_value_pb2.AttrValue(s=compat.as_bytes(name))) + return variable + + def to_tensors(self, value): + assert isinstance(value, BaseResourceVariable) + variable_accessed(value) + return [value.handle] + + def cast(self, value, _): + assert isinstance(value, BaseResourceVariable) + return value + + def _get_structure(self): + # shape, dtype, trainable, and alias_id are all leaves. + return PList(PLeaf(), PLeaf(), PLeaf(), PLeaf()) + + def __repr__(self): + return (f"{type(self).__name__}(shape={self.shape}, dtype={self.dtype!r}, " + f"trainable={self.trainable!r}, alias_id={self.alias_id!r})") + + def __hash__(self): + return hash((self.shape, self.dtype, self.trainable, self.alias_id)) + + def __eq__(self, other): + return (type(self) is type(other) and self.shape == other.shape and + self.dtype == other.dtype and self.trainable == other.trainable and + self.alias_id == other.alias_id) + + +nested_structure_coder.register_codec( + nested_structure_coder.BuiltInTypeSpecCodec( + VariableSpec, struct_pb2.TypeSpecProto.VARIABLE_SPEC + ) +) + + +def write_object_proto_for_resource_variable(resource_variable, + proto, + options, + enforce_naming=True): + """Writes additional information of the variable into the SavedObject proto. + + This allows users to define a `hook` to provide extra information of the + variable to the SavedObject. + + For example, DistributedVariable class would fill in components in the + distributed context. + + Args: + resource_variable: A `ResourceVariable` or `DistributedValue` that has the + information to be saved into the proto. + proto: `SavedObject` proto to update. + options: A `SaveOption` instance that configures save behavior. + enforce_naming: A bool determining whether to check that names end in the + expected string ':0' + """ + proto.variable.SetInParent() + if enforce_naming and not resource_variable.name.endswith(":0"): + raise ValueError(f"Cowardly refusing to save variable " + f"{resource_variable.name} because of " + f"unexpected suffix in the name (expected ':0')" + f"which won't be restored.") + proto.variable.name = tensor_module.get_op_name(resource_variable.name) + proto.variable.trainable = resource_variable.trainable + proto.variable.dtype = resource_variable.dtype.as_datatype_enum + proto.variable.synchronization = resource_variable.synchronization.value + proto.variable.aggregation = resource_variable.aggregation.value + proto.variable.shape.CopyFrom(resource_variable.shape.as_proto()) + if options.experimental_variable_policy._save_variable_devices( # pylint: disable=protected-access + ): + if hasattr(resource_variable, "device"): + proto.variable.device = resource_variable.device diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/special_math_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/special_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..6ee429a6783e27ecc9e267256bb0ded2e4f09b70 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/special_math_ops.py @@ -0,0 +1,1341 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Arithmetic Operations that don't fit into math_ops due to dependencies. + +To avoid circular dependencies, some math_ops should go here. +""" + +import collections +import functools +import re +import string + +import numpy as np +import opt_einsum + + +from tensorflow.compiler.tf2xla.ops import gen_xla_ops +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_linalg_ops +from tensorflow.python.ops import gen_special_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# TODO(b/27419586) Change docstring for required dtype of x once int allowed +@tf_export('math.lbeta', v1=['math.lbeta', 'lbeta']) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints('lbeta') +def lbeta(x, name=None): + r"""Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension. + + Given one-dimensional $z = [z_1,...,z_K]$, we define + + $$Beta(z) = \frac{\prod_j \Gamma(z_j)}{\Gamma(\sum_j z_j)},$$ + + where $\Gamma$ is the gamma function. + + And for $n + 1$ dimensional $x$ with shape $[N_1, ..., N_n, K]$, we define + + $$lbeta(x)[i_1, ..., i_n] = \log{|Beta(x[i_1, ..., i_n, :])|}.$$ + + In other words, the last dimension is treated as the $z$ vector. + + Note that if $z = [u, v]$, then + + $$Beta(z) = \frac{\Gamma(u)\Gamma(v)}{\Gamma(u + v)} + = \int_0^1 t^{u-1} (1 - t)^{v-1} \mathrm{d}t,$$ + + which defines the traditional bivariate beta function. + + If the last dimension is empty, we follow the convention that the sum over + the empty set is zero, and the product is one. + + Args: + x: A rank `n + 1` `Tensor`, `n >= 0` with type `float`, or `double`. + name: A name for the operation (optional). + + Returns: + The logarithm of \\(|Beta(x)|\\) reducing along the last dimension. + """ + # In the event that the last dimension has zero entries, we return -inf. + # This is consistent with a convention that the sum over the empty set 0, and + # the product is 1. + # This is standard. See https://en.wikipedia.org/wiki/Empty_set. + with ops.name_scope(name, 'lbeta', [x]): + x = ops.convert_to_tensor(x, name='x') + + # Note reduce_sum([]) = 0. + log_prod_gamma_x = math_ops.reduce_sum(math_ops.lgamma(x), axis=[-1]) + + # Note lgamma(0) = infinity, so if x = [] + # log_gamma_sum_x = lgamma(0) = infinity, and + # log_prod_gamma_x = lgamma(1) = 0, + # so result = -infinity + sum_x = math_ops.reduce_sum(x, axis=[-1]) + log_gamma_sum_x = math_ops.lgamma(sum_x) + result = log_prod_gamma_x - log_gamma_sum_x + + return result + + +@tf_export('math.special.dawsn') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def dawsn(x, name=None): + """Computes Dawson's integral of `x` element-wise. + + Dawson's integral is defined as `exp(-x**2)` times the integral of + `exp(t**2)` from `0` to `x`, with the domain of definition all real numbers. + + Dawson's function is odd. + >>> tf.math.special.dawsn([-1., -0.5, 0.5, 1.]).numpy() + array([-0.5380795, -0.4244364, 0.4244364, 0.5380795], dtype=float32) + + This implementation is based off of the Cephes math library. + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.dawsn + @end_compatibility + """ + with ops.name_scope(name, 'dawsn', [x]): + return gen_special_math_ops.dawsn(x) + + +@tf_export('math.special.expint') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def expint(x, name=None): + """Computes the Exponential integral of `x` element-wise. + + The Exponential integral is defined as the integral of `exp(t) / t` from + `-inf` to `x`, with the domain of definition all positive real numbers. + + >>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy() + array([ 1.8951179, 2.1673784, 5.3332353, 21.048464], dtype=float32) + + This implementation is based off of the Cephes math library. + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.expi + @end_compatibility + """ + with ops.name_scope(name, 'expint', [x]): + return gen_special_math_ops.expint(x) + + +@tf_export('math.special.fresnel_cos') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def fresnel_cos(x, name=None): + """Computes Fresnel's cosine integral of `x` element-wise. + + The Fresnel cosine integral is defined as the integral of `cos(t^2)` from + `0` to `x`, with the domain of definition all real numbers. + + The Fresnel cosine integral is odd. + >>> tf.math.special.fresnel_cos([-1., -0.1, 0.1, 1.]).numpy() + array([-0.7798934 , -0.09999753, 0.09999753, 0.7798934 ], dtype=float32) + + This implementation is based off of the Cephes math library. + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.fresnel second output. + @end_compatibility + """ + with ops.name_scope(name, 'fresnel_cos', [x]): + return gen_special_math_ops.fresnel_cos(x) + + +@tf_export('math.special.fresnel_sin') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def fresnel_sin(x, name=None): + """Computes Fresnel's sine integral of `x` element-wise. + + The Fresnel sine integral is defined as the integral of `sin(t^2)` from + `0` to `x`, with the domain of definition all real numbers. + + >>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy() + array([-0.43825912, -0.00052359, 0.00052359, 0.43825912], dtype=float32) + + This implementation is based off of the Cephes math library. + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.fresnel first output. + @end_compatibility + """ + with ops.name_scope(name, 'fresnel_sin', [x]): + return gen_special_math_ops.fresnel_sin(x) + + +@tf_export('math.special.spence') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def spence(x, name=None): + """Computes Spence's integral of `x` element-wise. + + Spence's integral is defined as the integral of `log(t) / (1 - t)` from + `1` to `x`, with the domain of definition all non-negative real numbers. + + >>> tf.math.special.spence([0.5, 1., 2., 3.]).numpy() + array([ 0.58224034, 0. , -0.82246685, -1.4367464], dtype=float32) + + This implementation is based off of the Cephes math library. + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.spence + @end_compatibility + """ + with ops.name_scope(name, 'spence', [x]): + return gen_special_math_ops.spence(x) + + +@tf_export('math.bessel_i0', 'math.special.bessel_i0') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_i0(x, name=None): + """Computes the Bessel i0 function of `x` element-wise. + + Modified Bessel function of order 0. + + It is preferable to use the numerically stabler function `i0e(x)` instead. + + >>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy() + array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.i0 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_i0', [x]): + return gen_special_math_ops.bessel_i0(x) + + +@tf_export('math.bessel_i0e', 'math.special.bessel_i0e') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_i0e(x, name=None): + """Computes the Bessel i0e function of `x` element-wise. + + Modified Bessel function of order 0. + + >>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy() + array([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.i0e + @end_compatibility + """ + with ops.name_scope(name, 'bessel_i0e', [x]): + return gen_special_math_ops.bessel_i0e(x) + + +@tf_export('math.bessel_i1', 'math.special.bessel_i1') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_i1(x, name=None): + """Computes the Bessel i1 function of `x` element-wise. + + Modified Bessel function of order 1. + + It is preferable to use the numerically stabler function `i1e(x)` instead. + + >>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy() + array([-0.5651591 , -0.25789431, 0.25789431, 0.5651591 ], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.i1 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_i1', [x]): + return gen_special_math_ops.bessel_i1(x) + + +@tf_export('math.bessel_i1e', 'math.special.bessel_i1e') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_i1e(x, name=None): + """Computes the Bessel i1e function of `x` element-wise. + + Modified Bessel function of order 1. + + >>> tf.math.special.bessel_i1e([-1., -0.5, 0.5, 1.]).numpy() + array([-0.20791042, -0.15642083, 0.15642083, 0.20791042], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.i1e + @end_compatibility + """ + with ops.name_scope(name, 'bessel_i1e', [x]): + return gen_special_math_ops.bessel_i1e(x) + + +@tf_export('math.special.bessel_k0') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_k0(x, name=None): + """Computes the Bessel k0 function of `x` element-wise. + + Modified Bessel function of order 0. + + It is preferable to use the numerically stabler function `k0e(x)` instead. + + >>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy() + array([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.k0 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_k0', [x]): + return gen_special_math_ops.bessel_k0(x) + + +@tf_export('math.special.bessel_k0e') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_k0e(x, name=None): + """Computes the Bessel k0e function of `x` element-wise. + + Modified Bessel function of order 0. + + >>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy() + array([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.k0e + @end_compatibility + """ + with ops.name_scope(name, 'bessel_k0e', [x]): + return gen_special_math_ops.bessel_k0e(x) + + +@tf_export('math.special.bessel_k1') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_k1(x, name=None): + """Computes the Bessel k1 function of `x` element-wise. + + Modified Bessel function of order 1. + + It is preferable to use the numerically stabler function `k1e(x)` instead. + + >>> tf.math.special.bessel_k1([0.5, 1., 2., 4.]).numpy() + array([1.65644112, 0.60190723, 0.13986588, 0.0124835 ], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.k1 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_k1', [x]): + return gen_special_math_ops.bessel_k1(x) + + +@tf_export('math.special.bessel_k1e') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_k1e(x, name=None): + """Computes the Bessel k1e function of `x` element-wise. + + Modified Bessel function of order 1. + + >>> tf.math.special.bessel_k1e([0.5, 1., 2., 4.]).numpy() + array([2.73100971, 1.63615349, 1.03347685, 0.68157595], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.k1e + @end_compatibility + """ + with ops.name_scope(name, 'bessel_k1e', [x]): + return gen_special_math_ops.bessel_k1e(x) + + +@tf_export('math.special.bessel_j0') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_j0(x, name=None): + """Computes the Bessel j0 function of `x` element-wise. + + Modified Bessel function of order 0. + + >>> tf.math.special.bessel_j0([0.5, 1., 2., 4.]).numpy() + array([ 0.93846981, 0.76519769, 0.22389078, -0.39714981], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.j0 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_j0', [x]): + return gen_special_math_ops.bessel_j0(x) + + +@tf_export('math.special.bessel_j1') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_j1(x, name=None): + """Computes the Bessel j1 function of `x` element-wise. + + Modified Bessel function of order 1. + + >>> tf.math.special.bessel_j1([0.5, 1., 2., 4.]).numpy() + array([ 0.24226846, 0.44005059, 0.57672481, -0.06604333], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.j1 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_j1', [x]): + return gen_special_math_ops.bessel_j1(x) + + +@tf_export('math.special.bessel_y0') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_y0(x, name=None): + """Computes the Bessel y0 function of `x` element-wise. + + Modified Bessel function of order 0. + + >>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy() + array([-0.44451873, 0.08825696, 0.51037567, -0.01694074], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.y0 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_y0', [x]): + return gen_special_math_ops.bessel_y0(x) + + +@tf_export('math.special.bessel_y1') +@dispatch.register_unary_elementwise_api +@dispatch.add_dispatch_support +def bessel_y1(x, name=None): + """Computes the Bessel y1 function of `x` element-wise. + + Modified Bessel function of order 1. + + >>> tf.math.special.bessel_y1([0.5, 1., 2., 4.]).numpy() + array([-1.47147239, -0.78121282, -0.10703243, 0.39792571], dtype=float32) + + Args: + x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, + `float32`, `float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. + + @compatibility(scipy) + Equivalent to scipy.special.y1 + @end_compatibility + """ + with ops.name_scope(name, 'bessel_y1', [x]): + return gen_special_math_ops.bessel_y1(x) + + +@ops.RegisterGradient('XlaEinsum') +def _einsum_grad(op, grad): + equation = op.get_attr('equation') + if isinstance(equation, bytes): + equation = equation.decode() + + inputs, output = equation.split('->') + left, right = inputs.split(',') + + return [ + gen_xla_ops.xla_einsum( + grad, + op.inputs[1], + equation='{},{}->{}'.format(output, right, left), + name=None), + gen_xla_ops.xla_einsum( + grad, + op.inputs[0], + equation='{},{}->{}'.format(output, left, right), + name=None) + ] + + +def _enclosing_tpu_context(): + # pylint: disable=protected-access + context = ops.get_default_graph()._get_control_flow_context() + # pylint: enable=protected-access + while context is not None and not isinstance( + context, control_flow_ops.XLAControlFlowContext): + context = context.outer_context + return context + + +@tf_export('einsum', 'linalg.einsum') +@dispatch.add_dispatch_support +def einsum(equation, *inputs, **kwargs): + r"""Tensor contraction over specified indices and outer product. + + Einsum allows defining Tensors by defining their element-wise computation. + This computation is defined by `equation`, a shorthand form based on Einstein + summation. As an example, consider multiplying two matrices A and B to form a + matrix C. The elements of C are given by: + + $$ C_{i,k} = \sum_j A_{i,j} B_{j,k} $$ + + or + + ``` + C[i,k] = sum_j A[i,j] * B[j,k] + ``` + + The corresponding einsum `equation` is: + + ``` + ij,jk->ik + ``` + + In general, to convert the element-wise equation into the `equation` string, + use the following procedure (intermediate strings for matrix multiplication + example provided in parentheses): + + 1. remove variable names, brackets, and commas, (`ik = sum_j ij * jk`) + 2. replace "*" with ",", (`ik = sum_j ij , jk`) + 3. drop summation signs, and (`ik = ij, jk`) + 4. move the output to the right, while replacing "=" with "->". (`ij,jk->ik`) + + Note: If the output indices are not specified repeated indices are summed. + So `ij,jk->ik` can be simplified to `ij,jk`. + + Many common operations can be expressed in this way. For example: + + **Matrix multiplication** + + >>> m0 = tf.random.normal(shape=[2, 3]) + >>> m1 = tf.random.normal(shape=[3, 5]) + >>> e = tf.einsum('ij,jk->ik', m0, m1) + >>> # output[i,k] = sum_j m0[i,j] * m1[j, k] + >>> print(e.shape) + (2, 5) + + Repeated indices are summed if the output indices are not specified. + + >>> e = tf.einsum('ij,jk', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k] + >>> print(e.shape) + (2, 5) + + + **Dot product** + + >>> u = tf.random.normal(shape=[5]) + >>> v = tf.random.normal(shape=[5]) + >>> e = tf.einsum('i,i->', u, v) # output = sum_i u[i]*v[i] + >>> print(e.shape) + () + + **Outer product** + + >>> u = tf.random.normal(shape=[3]) + >>> v = tf.random.normal(shape=[5]) + >>> e = tf.einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j] + >>> print(e.shape) + (3, 5) + + **Transpose** + + >>> m = tf.ones(2,3) + >>> e = tf.einsum('ij->ji', m0) # output[j,i] = m0[i,j] + >>> print(e.shape) + (3, 2) + + **Diag** + + >>> m = tf.reshape(tf.range(9), [3,3]) + >>> diag = tf.einsum('ii->i', m) + >>> print(diag.shape) + (3,) + + **Trace** + + >>> # Repeated indices are summed. + >>> trace = tf.einsum('ii', m) # output[j,i] = trace(m) = sum_i m[i, i] + >>> assert trace == sum(diag) + >>> print(trace.shape) + () + + **Batch matrix multiplication** + + >>> s = tf.random.normal(shape=[7,5,3]) + >>> t = tf.random.normal(shape=[7,3,2]) + >>> e = tf.einsum('bij,bjk->bik', s, t) + >>> # output[a,i,k] = sum_j s[a,i,j] * t[a, j, k] + >>> print(e.shape) + (7, 5, 2) + + This method does not support broadcasting on named-axes. All axes with + matching labels should have the same length. If you have length-1 axes, + use `tf.squeeze` or `tf.reshape` to eliminate them. + + To write code that is agnostic to the number of indices in the input + use an ellipsis. The ellipsis is a placeholder for "whatever other indices + fit here". + + For example, to perform a NumPy-style broadcasting-batch-matrix multiplication + where the matrix multiply acts on the last two axes of the input, use: + + >>> s = tf.random.normal(shape=[11, 7, 5, 3]) + >>> t = tf.random.normal(shape=[11, 7, 3, 2]) + >>> e = tf.einsum('...ij,...jk->...ik', s, t) + >>> print(e.shape) + (11, 7, 5, 2) + + Einsum **will** broadcast over axes covered by the ellipsis. + + >>> s = tf.random.normal(shape=[11, 1, 5, 3]) + >>> t = tf.random.normal(shape=[1, 7, 3, 2]) + >>> e = tf.einsum('...ij,...jk->...ik', s, t) + >>> print(e.shape) + (11, 7, 5, 2) + + Args: + equation: a `str` describing the contraction, in the same format as + `numpy.einsum`. + *inputs: the inputs to contract (each one a `Tensor`), whose shapes should + be consistent with `equation`. + **kwargs: + - optimize: Optimization strategy to use to find contraction path using + opt_einsum. Must be 'greedy', 'optimal', 'branch-2', 'branch-all' or + 'auto'. (optional, default: 'greedy'). + - name: A name for the operation (optional). + + Returns: + The contracted `Tensor`, with shape determined by `equation`. + + Raises: + ValueError: If + - the format of `equation` is incorrect, + - number of inputs or their shapes are inconsistent with `equation`. + """ + return _einsum_v2(equation, *inputs, **kwargs) + + +def _einsum_v1(equation, *inputs, **kwargs): + """Legacy implementation of einsum without using EinsumOp.""" + name = kwargs.pop('name', None) + if kwargs: + raise TypeError( + f'Invalid keyword arguments for this function: ' + f'{", ".join([format(key) for key in sorted(list(kwargs.keys()))])}.' + f' Expected: name.') + with ops.name_scope(name, 'einsum', [equation, inputs]) as name: + inputs = list(inputs) + input_shapes = [x.shape for x in inputs] + input_axis_labels, output_axis_labels = ( + _einsum_v1_parse_and_resolve_equation(equation, input_shapes)) + + axis_labels = set(''.join(input_axis_labels) + output_axis_labels) + + for a in axis_labels: + for input_labels in input_axis_labels: + if (len(input_axis_labels) == 1 and input_labels.count(a) == 2 and + input_labels == input_labels[::-1] and '->' not in equation): + return math_ops.trace(inputs[0]) + if input_labels.count(a) > 1: + raise ValueError( + f'Subscript not supported: the axis {a} appears more than once' + f' in {input_labels}.') + for a in axis_labels: + input_count = sum(1 for s in input_axis_labels if a in s) + if input_count > 2 and a not in output_axis_labels: + logging.warn( + f'Falling back to exponential-space implementation of einsum()' + f' because index {a} is summed over more than two inputs.') + return _exponential_space_einsum_v1(equation, *inputs) + + # Use xla_einsum if executing on TPU and if the operation is a 2 input + # einsum supported by XlaEinsumOp. + if _enclosing_tpu_context() is not None and len(inputs) == 2: + return gen_xla_ops.xla_einsum( + inputs[0], inputs[1], input_axis_labels[0] + ',' + + input_axis_labels[1] + '->' + output_axis_labels) + temp = inputs[0] + temp_axis_labels = input_axis_labels[0] + for i in range(len(inputs) - 1): + axes_to_sum = ( + set(temp_axis_labels) & + set(input_axis_labels[i + 1]) - set(output_axis_labels)) + temp, temp_axis_labels = _einsum_v1_reduction(temp, temp_axis_labels, + inputs[i + 1], + input_axis_labels[i + 1], + axes_to_sum) + + missing_indices = set(temp_axis_labels) - set(output_axis_labels) + if missing_indices: + axis = [ + i for i, a in enumerate(temp_axis_labels) + if a not in output_axis_labels + ] + temp = math_ops.reduce_sum(temp, axis=axis) + temp_axis_labels = ''.join( + a for a in temp_axis_labels if a in output_axis_labels) + if sorted(temp_axis_labels) != sorted(output_axis_labels): + raise ValueError( + f'Invalid equation: {equation}. The computed and specified output ' + f'labels do not match: {temp_axis_labels} vs {output_axis_labels}.') + + perm = [temp_axis_labels.index(a) for a in output_axis_labels] + return _transpose_if_necessary(temp, perm) + + +def _einsum_v1_parse_and_resolve_equation(equation, input_shapes): + """Helper for einsum() that splits/resolves inputs & outputs. + + Args: + equation: Equation string given as argument to einsum(). + input_shapes: List of the shapes of all inputs given to einsum() + + Returns: + input_axis_labels, output_axis_labels where: + input_axis_labels: List of length len(input_shapes) of strings + representing the character label for each dimension of each given input, + resolving any broadcast (...) axes, + output_axis_labels: A string of character labels for each axes of output + tensor, filling in missing output subscripts and broadcast axes. + + Raises: + ValueError: If equation is in the uncorrect format, incorrect number of + inputs given or broadcast axes "..." or output axes could not be resolved. + """ + equation = equation.replace(' ', '') + match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation) + if not match: + raise ValueError(f'Indices have incorrect format. Received: {equation}.') + + input_axis_labels = match.group(1).split(',') + output_axis_labels = match.group(2)[2:] if match.group(2) else None + + if len(input_shapes) != len(input_axis_labels): + raise ValueError( + f'Got {len(input_shapes)} arguments for equation "{equation}", ' + f'expecting {len(input_axis_labels)}.') + + # Resolve Ellipsis + # Assign axes labels for unspecified dimensions in inputs. Labels taken + # from unused labels. Follow numpy einsum broadcasting conventions for + # tensors of different length and unlabeled output. + ellipsis_axes = '' + if '...' in equation: + unused = ''.join( + c for c in string.ascii_letters if c not in ''.join(input_axis_labels)) + for i, ax in enumerate(input_axis_labels): + if '...' in ax: + parts = ax.split('...') + if len(parts) != 2: + raise ValueError(f'Unable to resolve ellipsis. ' + f'Excess number found: {len(parts)-1} vs 1.') + if input_shapes[i].ndims is None: + raise ValueError('Unable to statically infer ellipsis axes. The ' + 'input shapes has a dynamic dimensionality.') + n = input_shapes[i].ndims - len(''.join(parts)) + if n < 0: + raise ValueError('Ellipses lengths do not match.') + if len(unused) < n: + raise ValueError( + 'Unable to resolve ellipsis, too many distinct labels.') + replace_axes = unused[-n:] if n > 0 else '' + input_axis_labels[i] = input_axis_labels[i].replace('...', + replace_axes) + if len(replace_axes) > len(ellipsis_axes): + ellipsis_axes = replace_axes + + if any('.' in ax for ax in input_axis_labels): + raise ValueError( + f'Period "." found outside of ellipsis in input {input_axis_labels}.') + + if output_axis_labels is not None: + output_axis_labels = output_axis_labels.replace('...', ellipsis_axes) + if '.' in output_axis_labels: + raise ValueError(f'Period "." found outside of ellipsis in output ' + f'{output_axis_labels}.') + + if output_axis_labels is None: + # infer the output subscripts if not given, assume alphabetical order, + # but always place ellipsis axes before given. + axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes) + indices = ''.join(sorted(axis_labels)) + counts = {ax: 0 for ax in indices} + for axes_ in input_axis_labels: + for ax in axes_: + if ax not in ellipsis_axes: + counts[ax] += 1 + + output_axis_labels = ellipsis_axes + ''.join( + sorted(ax for ax in axis_labels if counts[ax] == 1)) + + return input_axis_labels, output_axis_labels + + +def _einsum_v1_reduction(t0, t0_axis_labels, t1, t1_axis_labels, axes_to_sum): + """Helper for einsum() that computes the result of a two-argument einsum(). + + Args: + t0: a `Tensor` + t0_axis_labels: a string of axis labels. This string's length must equal + the rank of t0. + t1: a `Tensor` + t1_axis_labels: a string to axis labels. This string's length must equal + the rank of t1. + axes_to_sum: set of labels of axes to be summed over + + Returns: + A `Tensor` whose elements are obtained by summing, over all axes in + `axes_to_sum`, the corresponding elements of `t0` and `t1`. + + For example, if t0_axis_labels == 'abijk', t1_axis_labels == 'acjkl', and + axes_to_sum == {j,k}, this will return a tensor x where + + out[a,b,c,i,l] = sum_j sum_k t0[a,b,i,j,k] * t1[a,c,j,k,l] + + Raises: + ValueError: if the rank of `t0` does not match the length of + `t0_axis_labels`, or that of `t1` does not match the length of + `t1_axis_labels`. + """ + if len(t0_axis_labels) != len(t0.shape): + raise ValueError( + f'Tensor `t0` of rank {len(t0.shape)} does not match einsum reduction ' + f'of length {len(t0_axis_labels)}.') + if len(t1_axis_labels) != len(t1.shape): + raise ValueError( + f'Tensor `t1` of rank {len(t1.shape)} does not match einsum reduction ' + f'of length {len(t1_axis_labels)}') + + # This function computes the result of a two-argument einsum() using batch + # matrix multiplication. This involves + # 1. transposing t0 and t1 so that axes are in the correct order for + # batch matrix multiplication, and + # 2. reshaping t0 and t1 so that they are both of rank 3. + + # First, we divide axes into three groups: + # * "preserved" axes are present in both inputs and the output + # * "summed" axes are present in both inputs but not the output + # * "broadcast" axes are present in exactly one input and the output + # + # As an example, if the einsum is abijk,acjkl->abcil, then "a" is a + # preserved axis, "b" and "c" are broadcast axes, and "j" and "k" are + # summed axes. + assert all(a in t0_axis_labels and a in t1_axis_labels for a in axes_to_sum) + preserved_axes = (set(t0_axis_labels) & set(t1_axis_labels)) - axes_to_sum + broadcast_axes = {} + for i, sym_list in enumerate([t0_axis_labels, t1_axis_labels]): + broadcast_axes[i] = set(sym_list) - preserved_axes - axes_to_sum + + # Reorder the axes so that: + # 1. preserved axes come first in both inputs + # 2. in input 0, broadcast axes come next, followed by summed axes + # 3. in input 1, summed axes come next, followed by broadcast axes + def sort_key(input_index, a): + if a in preserved_axes: + return (-1, a) + elif ((input_index == 0 and a in broadcast_axes[0]) or + (input_index == 1 and a in axes_to_sum)): + return (0, a) + else: + return (1, a) + + axis_labels = [t0_axis_labels, t1_axis_labels] + sorted_axes = [ + sorted(sym_list, key=lambda a: sort_key(i, a)) + for i, sym_list in enumerate(axis_labels) + ] + inputs = [t0, t1] + for i, axes_str in enumerate(axis_labels): + perm = [axes_str.find(a) for a in sorted_axes[i]] + inputs[i] = _transpose_if_necessary(inputs[i], perm) + t0, t1 = inputs + + if not axes_to_sum: + # In the special case where there are no axes to sum over, reduce to mul() + # rather than to batch matrix multiplication. + for _ in broadcast_axes[1]: + t0 = array_ops.expand_dims(t0, -1) + for _ in broadcast_axes[0]: + t1 = array_ops.expand_dims(t1, len(preserved_axes)) + product = math_ops.multiply(t0, t1) + product_axes = sorted_axes[0] + sorted_axes[1][len(preserved_axes):] + return product, ''.join(product_axes) + else: + # Reduce to matmul(). + + # Reshape both inputs so as to combine multiple broadcast axes + # into a single axis, and combine multiple summed axes into a + # single axis. + + t0_shape = _get_shape(t0) + num_broadcast_elements_t0 = _total_size( + t0_shape[len(preserved_axes):-len(axes_to_sum)]) + num_summed_elements = _total_size(t0_shape[-len(axes_to_sum):]) + new_shape = ( + t0_shape[:len(preserved_axes)] + + [num_broadcast_elements_t0, num_summed_elements]) + t0 = _reshape_if_necessary(t0, new_shape) + + t1_shape = _get_shape(t1) + num_broadcast_elements_t1 = _total_size( + t1_shape[len(preserved_axes) + len(axes_to_sum):]) + new_shape = ( + t1_shape[:len(preserved_axes)] + + [num_summed_elements, num_broadcast_elements_t1]) + t1 = _reshape_if_necessary(t1, new_shape) + + product = math_ops.matmul(t0, t1) + + # Undo compaction of broadcast axes + uncompacted_shape = ( + t0_shape[:len(preserved_axes) + len(broadcast_axes[0])] + + t1_shape[len(t1_shape) - len(broadcast_axes[1]):]) + product = _reshape_if_necessary(product, uncompacted_shape) + + product_axes = ( + sorted_axes[0][:len(preserved_axes) + len(broadcast_axes[0])] + + sorted_axes[1][len(sorted_axes[1]) - len(broadcast_axes[1]):]) + + return product, ''.join(product_axes) + + +def _transpose_if_necessary(tensor, perm): + """Like transpose(), but avoids creating a new tensor if possible.""" + if perm != list(range(len(perm))): + return array_ops.transpose(tensor, perm=perm) + else: + return tensor + + +def _reshape_if_necessary(tensor, new_shape): + """Like reshape(), but avoids creating a new tensor if possible.""" + # Accept None as an alias for -1 in new_shape. + new_shape = tuple(-1 if x is None else x for x in new_shape) + cur_shape = tuple(x.value for x in tensor.shape.dims) + if (len(new_shape) == len(cur_shape) and + all(not isinstance(d1, tensor_lib.Tensor) and (d0 == d1 or d1 == -1) + for d0, d1 in zip(cur_shape, new_shape))): + return tensor + else: + return array_ops.reshape(tensor, new_shape) + + +def _get_shape(tensor): + """Like get_shape().as_list(), but explicitly queries the shape of a tensor + if necessary to ensure that the returned value contains no unknown value.""" + + shape = tensor.shape.as_list() + none_indices = [i for i, d in enumerate(shape) if d is None] + if none_indices: + # Query the shape if shape contains None values + shape_tensor = array_ops.shape(tensor) + for i in none_indices: + shape[i] = shape_tensor[i] + return shape + + +def _total_size(shape_values): + """Given list of tensor shape values, returns total size. + If shape_values contains tensor values (which are results of + array_ops.shape), then it returns a scalar tensor. + If not, it returns an integer.""" + + result = 1 + for val in shape_values: + result *= val + return result + + +def _exponential_space_einsum_v1(equation, *inputs): + """Fallback implementation that supports summing an index over > 2 inputs.""" + inputs = list(inputs) + input_shapes = [x.shape for x in inputs] + idx_in, idx_out = _einsum_v1_parse_and_resolve_equation( + equation, input_shapes) + + idx_all = set(''.join(idx_in) + idx_out) + indices = ''.join(sorted(idx_all)) + + missing_idx = set(idx_out).difference(idx_all) + if missing_idx: + raise ValueError(f'Unknown output axes: {missing_idx}.') + + axis_order = {} + for ax in indices: + if ax not in idx_out: + axis_order[ax] = len(axis_order) + for ax in idx_out: + axis_order[ax] = len(axis_order) + + # transpose inputs so axes are in order + for i, (input_, axes_) in enumerate(zip(inputs, idx_in)): + if input_.shape.ndims != len(axes_): + raise ValueError( + f'Input {i} with axes {axes_} has incorrect number of dimensions ' + f'(expected {len(axes_)}, got {input_.shape.ndims}).') + + sorted_idx = sorted(axes_, key=axis_order.get) + + if len(set(axes_)) != len(axes_): + raise ValueError( + f'Subscript not supported: an axis appears more than once: {axes_}.') + + if list(axes_) != sorted_idx: + permuted = [axes_.find(ax) for ax in sorted_idx] + inputs[i] = array_ops.transpose(input_, permuted) + idx_in[i] = sorted_idx + + reduction_idx = [] + shapes = [[dim if dim else -1 + for dim in tensor.shape.as_list()] + for tensor in inputs] + + # validate shapes for broadcasting + for j, ax in enumerate(sorted(idx_all, key=axis_order.get)): + dims = [] + for i, idx in enumerate(idx_in): + if ax not in idx: + shapes[i].insert(j, 1) + else: + dim = shapes[i][j] + if isinstance(dim, int) and dim > 1: + dims.append(dim) + + if len(set(dims)) > 1: + raise ValueError(f'Dimension mismatch on axis: {ax}. ' + f'Found {len(set(dims))}, expected 1.') + + if ax not in idx_out: + reduction_idx.append(j) + + # reshape, multiply + expanded_inputs = [ + array_ops.reshape(input_, shape) for input_, shape in zip(inputs, shapes) + ] + expanded_output = 1 + for input_ in expanded_inputs: + expanded_output *= input_ + + # contract + return math_ops.reduce_sum(expanded_output, reduction_idx) + + +def _einsum_v2(equation, *inputs, **kwargs): + """Implementation of einsum utilizing opt_einsum and EinsumOp.""" + name = kwargs.pop('name', None) + optimize = kwargs.pop('optimize', 'greedy') + if kwargs: + raise TypeError( + f'Invalid keyword arguments for einsum: {", ".join(kwargs)}. ' + f'Valid arguments: name, optimize, greedy.') + + with ops.name_scope(name, 'einsum', [equation, inputs]) as name: + inputs = list(inputs) + input_shapes = [] + for operand in inputs: + if isinstance(operand.shape, tensor_shape.TensorShape): + input_shapes.append(operand.shape.as_list() if operand.shape else None) + else: + input_shapes.append(list(operand.shape)) + # Validate and sanitize the equation and resolve static input shapes, as + # opt_einsum requires that all shapes be a tuple of positive integers. + # Also remove ellipsis from the equation as opt_einsum will replace them + # with named labels. Then broadcasting between different shapes or ranks + # wouldn't work. (E.g. [1, 1, 2] wouldn't broadcast with [3, 1]). + resolved_equation, resolved_input_shapes, ellipsis_label = ( + _einsum_v2_parse_and_resolve_equation(equation, input_shapes)) + + if len(inputs) <= 2: # No need to call opt_einsum. + # Replace back ellipses that were removed for opt_einsum. + if ellipsis_label: + resolved_equation = resolved_equation.replace(ellipsis_label, '...') + return gen_linalg_ops.einsum(inputs, resolved_equation) + + # Send fully specified shapes to opt_einsum, since it cannot handle unknown + # dimensions. For unknown dimensions, we guess that the dimension equals 1. + # Instead of creating Tensors or NumPy arrays with the specified shape, + # create a dummy `shaped` object with a `shape` property. + shaped = collections.namedtuple('shaped', ['shape']) + shaped_inputs = tuple( + [shaped(tuple(shape)) for shape in resolved_input_shapes]) + # opt_einsum breaks down an n-ary einsum operation into n-1 binary einsums. + # Obtain the sequence of equations and the indices of operands involved in + # each einsum operation. + indices_and_equations = _get_opt_einsum_contract_path( + resolved_equation, shaped_inputs, optimize) + for operand_indices, binary_equation in indices_and_equations: + if ellipsis_label: + # Replace back ellipses that were removed for opt_einsum. + binary_equation = binary_equation.replace(ellipsis_label, '...') + operands = list(map(inputs.pop, operand_indices)) + inputs.append(gen_linalg_ops.einsum(operands, binary_equation)) + return inputs[0] + + +def _get_opt_einsum_contract_path(equation, shaped_inputs_tuple, optimize): + """Returns the (memoized) result of opt_einsum.contract_path.""" + # Note: We use einsum_call=True, which is an internal api for opt_einsum, + # to get the contraction path without having opt_einsum perform the actual + # contractions. + _, contractions = opt_einsum.contract_path( + equation, + *shaped_inputs_tuple, + optimize=optimize, + einsum_call=True, + use_blas=True) + # Return a tuple so that the cached value is not mutable. + indices_and_equations = tuple([(expr[0], expr[2]) for expr in contractions]) + return indices_and_equations + + +# Cache the possibly expensive opt_einsum.contract_path call using lru_cache +# from the Python3+ standard library. +_get_opt_einsum_contract_path = functools.lru_cache(maxsize=128)( + _get_opt_einsum_contract_path) + + +def _einsum_v2_parse_and_resolve_equation(equation, input_shapes): + """Helper which validates einsum equation and resolves input shapes.""" + resolved_equation = equation.replace(' ', '') + ellipsis_label = None + if '...' in equation: + # Replace ellipsis ('...') with '0' for (a) ease of parsing and (b) to + # prevent opt_einsum from resolving them into named labels; as it doesn't + # support broadcasting. + ellipsis_label = '0' + if ellipsis_label in resolved_equation: + raise ValueError( + f'Invalid character "{ellipsis_label}" in equation: {equation}.') + resolved_equation = resolved_equation.replace('...', ellipsis_label) + + # Ensure there are no non-alphanumeric characters in the equation, including + # periods (`.`) outside of ellipses, in the equation. This is not a hard + # requirement; except we use a special character '0' for ellipsis. + allowed_labels = 'a-zA-Z' + if ellipsis_label: + allowed_labels += ellipsis_label + match = re.match('^([{0},]*)(->[{0}]*)?$'.format(allowed_labels), + resolved_equation) + if not match: + raise ValueError( + 'Subscripts have incorrect format: {}'.format(resolved_equation)) + input_labels = match.group(1).split(',') + output_labels = match.group(2)[2:] if match.group(2) else None + + if len(input_shapes) != len(input_labels): + raise ValueError('Got {} inputs for equation "{}", expecting {}'.format( + len(input_shapes), equation, len(input_labels))) + + # Special case: if there are no '->', then we create output subscripts from + # labels appearing only once. + if '->' not in resolved_equation: + label_counts = collections.Counter(match.group(1)) + output_labels = ''.join([ + x for x in sorted(list(label_counts)) + if x != ',' and label_counts[x] == 1 + ]) + resolved_equation += '->' + output_labels + # Validate output_labels. + if output_labels and len(set(output_labels)) != len(output_labels): + raise ValueError( + 'Output subscripts contain a label appearing more than once: {}'.format( + equation)) + input_label_set = set(match.group(1)) + for label in output_labels: + if label != ellipsis_label and label not in input_label_set: + raise ValueError('Output subscripts contain the label {} not present ' + 'in the input subscripts.'.format(label)) + if ellipsis_label and output_labels: + num_output_ellipses = output_labels.count(ellipsis_label) + if num_output_ellipses > 1: + raise ValueError( + 'Output subscripts contain multiple ellipsis: {}'.format(equation)) + + # Early return if <= 2 inputs. Resolved shapes are not needed. + if len(input_shapes) <= 2: + return resolved_equation, None, ellipsis_label + + # Create a map from axis labels to known dimensions. This is used to infer + # unknown dimensions if a known dimension also has the same label. + label_to_dim = collections.defaultdict(lambda: 1) + for i, (labels, shape) in enumerate(zip(input_labels, input_shapes)): + if shape is None: + continue + ellipsis_start = labels.find(ellipsis_label) if ellipsis_label else -1 + if ellipsis_start != -1: # This input contains an ellipsis. + if ellipsis_start != labels.rfind(ellipsis_label): + raise ValueError(f'Too many ellipses in input label ' + f'{labels.replace(ellipsis_label, "...")}.') + if len(labels) > len(shape) + 1: + raise ValueError('Too many named labels in {}th subscript string of' + ' equation {} for input shape {} '.format( + i, equation, shape)) + ellipsis_end = ellipsis_start + len(shape) + 1 - len(labels) + shape[ellipsis_start:ellipsis_end] = ([ + np.prod( + list(filter(None, shape[ellipsis_start:ellipsis_end])), + dtype=np.int64) + ]) + else: + # This input does not contain an ellipsis. + if len(labels) != len(shape): + raise ValueError( + 'Number of named labels in input #{} of equation {} ' + 'must be equal to the number of dimensions in shape {}'.format( + i, equation, shape)) + for dim, label in zip(shape, labels): + if dim is not None: + label_to_dim[label] = max(label_to_dim[label], dim) + + resolved_shapes = [] + for labels in input_labels: + resolved_shapes.append([label_to_dim[label] for label in labels]) + return resolved_equation, resolved_shapes, ellipsis_label diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/tensor_math_operator_overrides.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/tensor_math_operator_overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..f94d2a14da8faabbadf32e01d97562ded4711459 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/tensor_math_operator_overrides.py @@ -0,0 +1,168 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Overrides for Tensor operators.""" + + +from tensorflow.python.framework import override_binary_operator +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.util import tf_decorator + + +# pylint: disable=g-import-not-at-top +def _add_dispatch_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops._add_dispatch(x, y, name=name) # pylint: disable=protected-access + + +def _and_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.and_(x, y, name=name) + + +def _div_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.div(x, y, name=name) + + +def _floordiv_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.floordiv(x, y, name=name) + + +def _matmul_factory(a, b, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.matmul_wrapper(a, b, name=name) + + +def _mod_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.mod(x, y, name=name) + + +def _mul_dispatch_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops._mul_dispatch(x, y, name=name) # pylint: disable=protected-access + + +def _or_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.or_(x, y, name=name) + + +def _pow_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.pow(x, y, name=name) + + +def _subtract_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.subtract(x, y, name=name) + + +def _truediv_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.truediv(x, y, name=name) + + +def _xor_factory(x, y, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.xor_(x, y, name=name) + + +override_binary_operator.override_binary_operator_helper( + _add_dispatch_factory, "add" +) +override_binary_operator.override_binary_operator_helper(_and_factory, "and") +override_binary_operator.override_binary_operator_helper(_div_factory, "div") +override_binary_operator.override_binary_operator_helper( + _floordiv_factory, "floordiv" +) +override_binary_operator.override_binary_operator_helper( + _matmul_factory, "matmul" +) +override_binary_operator.override_binary_operator_helper(_mod_factory, "mod") +override_binary_operator.override_binary_operator_helper( + _mul_dispatch_factory, "mul" +) +override_binary_operator.override_binary_operator_helper(_or_factory, "or") +override_binary_operator.override_binary_operator_helper(_pow_factory, "pow") +override_binary_operator.override_binary_operator_helper( + _subtract_factory, "sub" +) +override_binary_operator.override_binary_operator_helper( + _truediv_factory, "truediv" +) +override_binary_operator.override_binary_operator_helper(_xor_factory, "xor") + + +def _invert_factory(x, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.invert_(x, name=name) + + +def _abs_factory(x, name=None): + from tensorflow.python.ops import math_ops + + return math_ops.abs(x, name=name) + + +def _tensor_equals_factory(self, other): + from tensorflow.python.ops import math_ops + + return math_ops.tensor_equals(self, other) + + +def _tensor_not_equals_factory(self, other): + from tensorflow.python.ops import math_ops + + return math_ops.tensor_not_equals(self, other) + + +def _promote_dtypes_decorator(fn): + def wrapper(x, y, *args, **kwargs): + x, y = override_binary_operator.maybe_promote_tensors(x, y) + return fn(x, y, *args, **kwargs) + + return tf_decorator.make_decorator(fn, wrapper) + + +# pylint: disable=protected-access +tensor_lib.Tensor._override_operator("__invert__", _invert_factory) +tensor_lib.Tensor._override_operator("__neg__", gen_math_ops.neg) +tensor_lib.Tensor._override_operator("__abs__", _abs_factory) +tensor_lib.Tensor._override_operator("__lt__", _promote_dtypes_decorator( + gen_math_ops.less)) +tensor_lib.Tensor._override_operator("__le__", _promote_dtypes_decorator( + gen_math_ops.less_equal)) +tensor_lib.Tensor._override_operator("__gt__", _promote_dtypes_decorator( + gen_math_ops.greater)) +tensor_lib.Tensor._override_operator("__ge__", _promote_dtypes_decorator( + gen_math_ops.greater_equal)) +tensor_lib.Tensor._override_operator("__eq__", _tensor_equals_factory) +tensor_lib.Tensor._override_operator("__ne__", _tensor_not_equals_factory) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..56e352a63c4207c602eeba84ef4c316a4bca7b06 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py @@ -0,0 +1,292 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Methods for rewriting while_v2 grad functions with IndexedSlices output.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_resource_variable_ops +from tensorflow.python.util import nest + + +def rewrite_grad_indexed_slices(grads, body_grad_graph, loop_vars, + forward_inputs): + """Handles special case of IndexedSlices returned from while gradient. + + Some gradient functions return IndexedSlices instead of a Tensor (e.g. the + gradient of Gather ops). When this happens in the gradient of a while body, + the resulting gradient body function will have mismatched inputs and outputs, + since the input is a single Tensor, but the IndexedSlices gets unnested into + three output Tensors. + + This function fixes this by rewriting the gradient body to have three inputs + to match the three outputs, i.e., it effectively converts the input Tensor + into an input IndexedSlices. It also returns new `loop_vars` to reflect the + new inputs. + + Args: + grads: the input gradient Tensors to the while gradient computation. + body_grad_graph: _WhileBodyGradFuncGraph. + loop_vars: list of Tensors. The inputs to body_grad_graph. + forward_inputs: list of Tensors. The (flat) inputs to the forward-pass While + op. + + Returns: + The new loop_vars to pass to body_grad_graph. + """ + # Match up body_grad_graph.structured_outputs with the corresponding + # forward_inputs. + # + # Note that we don't expect a gradient computation to have structured output + # (e.g. no nested lists), so no need to flatten + # body_grad_graph.structured_outputs. However, structured_outputs may still + # contain composite tensors such as IndexedSlices, unlike + # body_grad_graph.outputs, which contains flattened composite tensors. + inputs_with_grads = [ + t for g, t in zip(grads, forward_inputs) if g is not None + ] + # Skip loop counter, maximum_iterations and total number of loop iterations. + structured_outputs = body_grad_graph.structured_outputs[3:] + + for forward_input, output in zip(inputs_with_grads, structured_outputs): + if not isinstance(output, indexed_slices.IndexedSlices): + continue + + if forward_input.dtype == dtypes.resource: + # TODO(skyewm): In theory we should use this for all captured inputs, not + # just resource handles (which can only be captured). We can do this by + # checking that forward_input is passed straight through to its output. + loop_vars = _rewrite_input_as_indexed_slices(body_grad_graph, output, + forward_input, loop_vars) + else: + _rewrite_output_as_tensor(body_grad_graph, output) + + return loop_vars + + +def _get_tensor_index_in_iterable(iterable, t): + """Returns index of first occurence of `t`, raises ValueError if not found.""" + for i, elem in enumerate(iterable): + if t is elem: + return i + raise ValueError(f"Element `{t!r}` is not found in iterable `{iterable!r}`.") + + +def _rewrite_output_as_tensor(body_grad_graph, grad_output_slices): + """Rewrites grad_output_slices to be a Tensor output. + + Args: + body_grad_graph: _WhileBodyGradFuncGraph. + grad_output_slices: IndexedSlices output of body_grad_graph. + """ + with body_grad_graph.as_default(): + new_output = tensor_conversion.convert_to_tensor_v2(grad_output_slices) + + idx = _get_tensor_index_in_iterable(body_grad_graph.structured_outputs, + grad_output_slices) + body_grad_graph.structured_outputs[idx] = new_output + body_grad_graph.outputs = func_graph.flatten( + body_grad_graph.structured_outputs) + + +def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, + forward_input, loop_vars): + """Rewrites grad_output_slices's corresponding input to be an IndexedSlices. + + This rewrite requires that forward_input was captured in the forward loop, + i.e. is not a user-specified loop variable. This is important because the + rewrite assumes that forward_input is passed through to its corresponding + output unchanged. This assumption is used in _rewrite_input_as_indexed_slices, + which depends on the exact gradient structure produced by the input's fanout. + + This can yield a more efficient computation than using + _rewrite_output_as_tensor, since it preserves the IndexedSlices structure + instead of converting the IndexedSlices to a dense Tensor. + + Args: + body_grad_graph: _WhileBodyGradFuncGraph. + grad_output_slices: IndexedSlices output of body_grad_graph. + forward_input: the corresponding Tensor input to the forward loop. + loop_vars: list of Tensors. The inputs to body_grad_graph. + + Returns: + The new loop_vars to pass to body_grad_graph. + """ + # Create initial IndexedSlices that will be the input to the grad While + # op. This will start as zeros, and accumulate the IndexedSlices grad output. + # Note that because forward_input is captured and not a loop var, its incoming + # gradient should always be zero. + init_slices = _create_grad_indexed_slices_init(grad_output_slices, + forward_input) + + # Create a new version of grad_output_slices's gradient computation that uses + # the new IndexedSlices input instead of the original Tensor input. We'll + # return the new computation and leave the old computation as dead code. + # TODO(skyewm): considering pruning body_grad_graph to remove the old + # computation. + with body_grad_graph.as_default(): + input_slices = indexed_slices.IndexedSlices( + values=body_grad_graph.capture(init_slices.values, allowlisted=True), + indices=body_grad_graph.capture(init_slices.indices, allowlisted=True), + dense_shape=body_grad_graph.capture( + init_slices.dense_shape, allowlisted=True)) + + # Remove the captured tensors from the function inputs. We'll add them back + # at the correct index in _update_indexed_slices_param. + for t in _flatten(init_slices): + captured_t = body_grad_graph.captures.pop(t) + body_grad_graph.inputs.remove(captured_t) + + new_output_slices = _rewrite_grad_indexed_slices_output( + grad_output_slices, input_slices) + + # Update body_grad_graph's inputs and outputs to reflect the new + # IndexedSlices computation. + return _update_indexed_slices_param(body_grad_graph, loop_vars, init_slices, + input_slices, new_output_slices, + grad_output_slices) + + +def _create_grad_indexed_slices_init(grad_output_slices, forward_input): + """Creates an IndexedSlices to pass as input to the while grad function. + + Args: + grad_output_slices: IndexedSlices. The corresponding while grad function + output. + forward_input: Tensor. The corresponding input to the forward while op. + + Returns: + Zeros IndexedSlices, created in current Graph. + """ + assert isinstance(grad_output_slices, indexed_slices.IndexedSlices) + assert isinstance(forward_input, tensor.Tensor) + values_out = grad_output_slices.values + indices_out = grad_output_slices.indices + + # Create the initial values tensor. + if values_out.shape.is_fully_defined(): + values_shape = tensor_shape.TensorShape([0] + + values_out.shape.as_list()[1:]) + values = array_ops.zeros( + values_shape, dtype=values_out.dtype, name="values_init") + else: + if forward_input.dtype == dtypes.resource: + forward_shape = gen_resource_variable_ops.variable_shape(forward_input) + else: + forward_shape = array_ops.shape(forward_input) + values_shape = array_ops.concat([[0], forward_shape[1:]], 0) + values = array_ops.zeros( + values_shape, dtype=values_out.dtype, name="values_init") + + # Create the initial indices tensor. + indices = constant_op.constant([], indices_out.dtype, name="indices_init") + + # Create the initial dense_shape tensor. We assume is the same shape as + # forward_input, since captured tensors don't change shape across loop + # iterations. + if forward_input.dtype == dtypes.resource: + shape = gen_resource_variable_ops.variable_shape( + forward_input, name="shape_init") + else: + shape = array_ops.shape(forward_input, name="shape_init") + + return indexed_slices.IndexedSlices( + values=values, indices=indices, dense_shape=shape) + + +def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices): + """Creates a new version of old_output_slices with new_input_slices as input. + + This method assumes that old_output_slices.{values,indices} are produced by + concatenating the incoming gradient Tensor input with the IndexedSlices + produced by the gradient computation of the while body. See + backprop.aggregate_indexed_slices_gradients for where these concats are + constructed. We build new concats that use new_input_slices instead of the + original Tensor input. + + Args: + old_output_slices: original IndexedSlices output of while gradient. + new_input_slices: new IndexedSlices to use as input to while gradient. + + Returns: + A new IndexedSlices to replace old_output_slices. + """ + + def rewrite(old_output, new_input): + assert old_output.type == "Identity" + concat_op = old_output.inputs[0].op + assert concat_op.type == "ConcatV2" + # Don't include axis arg + old_concat_args = concat_op.inputs[:-1] + # We assume that the original gradient input was the first argument to the + # concat op. + # TODO(skyewm): do this in a more robust way. + return array_ops.concat([new_input] + old_concat_args[1:], 0) + + values = rewrite(old_output_slices.values.op, new_input_slices.values) + indices = rewrite(old_output_slices.indices.op, new_input_slices.indices) + return indexed_slices.IndexedSlices( + values=values, indices=indices, dense_shape=new_input_slices.dense_shape) + + +def _update_indexed_slices_param(graph, loop_vars, init_slices, input_slices, + output_slices, old_output_slices): + """Updates graph with new IndexedSlices input/output. + + Updates graph's metadata to output the gradient computation defined by + init_slices, input_slices, and output_slices, instead of outputting + old_output_slices. Also returns a new version of loop_vars with init_slices + replacing the old input. + + Args: + graph: _WhileBodyGradFuncGraph. + loop_vars: the inputs to graph. + init_slices: the new IndexedSlices to use as input to graph. + input_slices: the new IndexedSlices in graph that should be fed by + init_slices. + output_slices: the new IndexedSlices in graph that should be the + corresponding output to input_slices. + old_output_slices: the IndexedSlices in graph that are currently being + output. + + Returns: + New loop_vars to pass to graph. + """ + structured_idx = _get_tensor_index_in_iterable(graph.structured_outputs, + old_output_slices) + # We assume that the component tensors of old_output_slices appear + # sequentially in graph.outputs. We use the first of these tensors + # as the reference index. + flat_idx = _get_tensor_index_in_iterable( + graph.outputs, + func_graph.flatten(old_output_slices)[0]) + + graph.structured_outputs[structured_idx] = output_slices + graph.outputs = func_graph.flatten(graph.structured_outputs) + + graph.inputs = ( + graph.inputs[:flat_idx] + _flatten(input_slices) + + graph.inputs[flat_idx + 1:]) + + return loop_vars[:flat_idx] + _flatten(init_slices) + loop_vars[flat_idx + 1:] + + +def _flatten(arg): + return nest.flatten(arg, expand_composites=True)