diff --git a/.gitattributes b/.gitattributes index dc17c9216f19000535c2cd1ec46b273e984426e3..f363868f0a8e68e840a5e42d4d511bbb6afb2546 100644 --- a/.gitattributes +++ b/.gitattributes @@ -824,3 +824,5 @@ pllava/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs d videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_dtypes.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_python_memory_checker_helper.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/n/nansi.sysk b/llava_next/share/terminfo/n/nansi.sysk new file mode 100644 index 0000000000000000000000000000000000000000..9c45d353217571da44c7c1ebf92e1f56174821c7 Binary files /dev/null and b/llava_next/share/terminfo/n/nansi.sysk differ diff --git a/llava_next/share/terminfo/n/ncr160vt100wpp b/llava_next/share/terminfo/n/ncr160vt100wpp new file mode 100644 index 0000000000000000000000000000000000000000..1b21d07a8809a4b2069ff82035372e20b7c08838 Binary files /dev/null and b/llava_next/share/terminfo/n/ncr160vt100wpp differ diff --git a/llava_next/share/terminfo/n/ncr160vt200pp b/llava_next/share/terminfo/n/ncr160vt200pp new file mode 100644 index 0000000000000000000000000000000000000000..85d0af33c105a80a75a89cbc8986dd81e98f4bc2 Binary files /dev/null and b/llava_next/share/terminfo/n/ncr160vt200pp differ diff --git a/llava_next/share/terminfo/n/ncr260intan b/llava_next/share/terminfo/n/ncr260intan new file mode 100644 index 0000000000000000000000000000000000000000..2def18a5df6ba0740a83c400b131d56a291b989f Binary files /dev/null and b/llava_next/share/terminfo/n/ncr260intan differ diff --git a/llava_next/share/terminfo/n/ncr260vt100wpp b/llava_next/share/terminfo/n/ncr260vt100wpp new file mode 100644 index 0000000000000000000000000000000000000000..9628e2b5df9e506e5b7029f2af954db6bbe20704 Binary files /dev/null and b/llava_next/share/terminfo/n/ncr260vt100wpp differ diff --git a/llava_next/share/terminfo/n/ncr260vt300an b/llava_next/share/terminfo/n/ncr260vt300an new file mode 100644 index 0000000000000000000000000000000000000000..eb59796b5f1a846ed69b800e27648f1a9b6e9ee7 Binary files /dev/null and b/llava_next/share/terminfo/n/ncr260vt300an differ diff --git a/llava_next/share/terminfo/n/ncr260wy50+pp b/llava_next/share/terminfo/n/ncr260wy50+pp new file mode 100644 index 0000000000000000000000000000000000000000..cbb06eccc02e5e7e9f0d8497ea029ebf752d062c Binary files /dev/null and b/llava_next/share/terminfo/n/ncr260wy50+pp differ diff --git a/llava_next/share/terminfo/n/ncrvt100an b/llava_next/share/terminfo/n/ncrvt100an new file mode 100644 index 0000000000000000000000000000000000000000..f97322b968243eeb6e6640aa5f96420a7b4a6058 Binary files /dev/null and b/llava_next/share/terminfo/n/ncrvt100an differ diff --git a/llava_next/share/terminfo/n/ncrvt100wpp b/llava_next/share/terminfo/n/ncrvt100wpp new file mode 100644 index 0000000000000000000000000000000000000000..d5cdb64003254e9fc7ddca5352e6587e38b56e3f Binary files /dev/null and b/llava_next/share/terminfo/n/ncrvt100wpp differ diff --git a/llava_next/share/terminfo/n/ncsa-m b/llava_next/share/terminfo/n/ncsa-m new file mode 100644 index 0000000000000000000000000000000000000000..99bcc31bb767d2ea2914e3675ceb9d3a63e65dfe Binary files /dev/null and b/llava_next/share/terminfo/n/ncsa-m differ diff --git a/llava_next/share/terminfo/n/ncsa-vt220 b/llava_next/share/terminfo/n/ncsa-vt220 new file mode 100644 index 0000000000000000000000000000000000000000..0989fa4f7add726346a1db269d3ae5830ae22732 Binary files /dev/null and b/llava_next/share/terminfo/n/ncsa-vt220 differ diff --git a/llava_next/share/terminfo/n/ndr9500-25-nl b/llava_next/share/terminfo/n/ndr9500-25-nl new file mode 100644 index 0000000000000000000000000000000000000000..99e669a570ad6c2dccbb360bc29519bbb0054d78 Binary files /dev/null and b/llava_next/share/terminfo/n/ndr9500-25-nl differ diff --git a/llava_next/share/terminfo/n/netbsd6 b/llava_next/share/terminfo/n/netbsd6 new file mode 100644 index 0000000000000000000000000000000000000000..17121429d442466c9814ac4d8fa9a7a4082d20c0 Binary files /dev/null and b/llava_next/share/terminfo/n/netbsd6 differ diff --git a/llava_next/share/terminfo/n/news-42-euc b/llava_next/share/terminfo/n/news-42-euc new file mode 100644 index 0000000000000000000000000000000000000000..d12b4eef7f27336e493ab6e05f04be126300625e Binary files /dev/null and b/llava_next/share/terminfo/n/news-42-euc differ diff --git a/llava_next/share/terminfo/n/news29 b/llava_next/share/terminfo/n/news29 new file mode 100644 index 0000000000000000000000000000000000000000..8f23324c872037090586950aee90b05774b7d870 Binary files /dev/null and b/llava_next/share/terminfo/n/news29 differ diff --git a/llava_next/share/terminfo/n/nsterm-256color b/llava_next/share/terminfo/n/nsterm-256color new file mode 100644 index 0000000000000000000000000000000000000000..f10555ae5ef425315e1951b34c9f18e1666276d1 Binary files /dev/null and b/llava_next/share/terminfo/n/nsterm-256color differ diff --git a/llava_next/share/terminfo/n/nsterm-7-s b/llava_next/share/terminfo/n/nsterm-7-s new file mode 100644 index 0000000000000000000000000000000000000000..d924966d315cc1816c2bf713ee935e1348430c02 Binary files /dev/null and b/llava_next/share/terminfo/n/nsterm-7-s differ diff --git a/llava_next/share/terminfo/n/nsterm-acs b/llava_next/share/terminfo/n/nsterm-acs new file mode 100644 index 0000000000000000000000000000000000000000..444a0e0858a8436c875d1da34dcc2139104a22a6 Binary files /dev/null and b/llava_next/share/terminfo/n/nsterm-acs differ diff --git a/llava_next/share/terminfo/n/nsterm-acs-m b/llava_next/share/terminfo/n/nsterm-acs-m new file mode 100644 index 0000000000000000000000000000000000000000..2e8b3c8299dc20db92507262477342bbacf0c9cb Binary files /dev/null and b/llava_next/share/terminfo/n/nsterm-acs-m differ diff --git a/llava_next/share/terminfo/n/nsterm-c-s-7 b/llava_next/share/terminfo/n/nsterm-c-s-7 new file mode 100644 index 0000000000000000000000000000000000000000..3bee1b4ad65181eea01bf1e63a69defc2ff6a8e0 Binary files /dev/null and b/llava_next/share/terminfo/n/nsterm-c-s-7 differ diff --git a/llava_next/share/terminfo/n/ntconsole-35 b/llava_next/share/terminfo/n/ntconsole-35 new file mode 100644 index 0000000000000000000000000000000000000000..2332b3ab8f198b4e300eee78fd0b6bf7794b8d9a Binary files /dev/null and b/llava_next/share/terminfo/n/ntconsole-35 differ diff --git a/llava_next/share/terminfo/n/ntconsole-35-nti b/llava_next/share/terminfo/n/ntconsole-35-nti new file mode 100644 index 0000000000000000000000000000000000000000..1544b68f31cd46607720c4af53c481c52013e2be Binary files /dev/null and b/llava_next/share/terminfo/n/ntconsole-35-nti differ diff --git a/llava_next/share/terminfo/n/ntconsole-50-nti b/llava_next/share/terminfo/n/ntconsole-50-nti new file mode 100644 index 0000000000000000000000000000000000000000..b9f8ed7580466dfaec31f0c785c0c3277b7c8a3d Binary files /dev/null and b/llava_next/share/terminfo/n/ntconsole-50-nti differ diff --git a/llava_next/share/terminfo/n/nwe501 b/llava_next/share/terminfo/n/nwe501 new file mode 100644 index 0000000000000000000000000000000000000000..5eb4ff3878cb36a95a26570cb0bb99e5daab3123 Binary files /dev/null and b/llava_next/share/terminfo/n/nwe501 differ diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/__init__.py b/parrot/lib/python3.10/site-packages/torch/_custom_op/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a7b2a8dbd8b0e1a33d0a71c7b6c882fd8cc75c0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41bb3d085367bff9a62e6063c560865d01963c3b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c971ac3fd150b711d8492182f9022a46e13feb72 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beeb7fa1ac0da9734a554ba49394eaeb7e3045d0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/autograd.py b/parrot/lib/python3.10/site-packages/torch/_custom_op/autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..35727197d03c1c4c1e00584d2c25e1830d6bcbd8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_custom_op/autograd.py @@ -0,0 +1,275 @@ +# mypy: allow-untyped-defs +import torch +import torch.utils._pytree as pytree +from collections import namedtuple +import functools + + +# NOTE [CustomOp autograd kernel indirection] +# We register `inner` as the autograd kernel for this custom_op. +# `inner` either calls the autograd formula registered by the user, +# or goes into an `autograd_not_implemented` kernel. +# +# The reason why this indirection exists is +# so that we can swap out the autograd kernel (the PyTorch dispatcher +# doesn't actually allow us to do this). By default, we want +# the `autograd_not_implemented` behavior, but then the user may come +# and register something that is actually a backward formula +def autograd_kernel_indirection(custom_op): + autograd_fallback = autograd_not_implemented(custom_op) + + def inner(*args, **kwargs): + if custom_op._has_impl('autograd'): + kernel = custom_op._get_impl('autograd').func + return kernel(*args, **kwargs) + # As explained in NOTE ["backward", "save_for_backward", and "autograd"], + # after the user gives us "backward" and "save_for_backward", we generate + # the "autograd" impl. If the user only provided one, then we tell + # the user they've done something wrong. + if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'): + missing = ( + 'save_for_backward' if custom_op._has_impl('backward') + else 'backward' + ) + found = 'save_for_backward' if missing == 'backward' else 'backward' + loc = custom_op._get_impl(found).location + raise RuntimeError( + f"We found a '{found}' registration for {custom_op} at " + f"{loc} but were unable to find a '{missing}' registration. " + f"To use the CustomOp API to register a backward formula, " + f"please provide us both a backward function and a " + f"'save for backward' function via `impl_backward` and " + f"`impl_save_for_backward` respectively.") + return autograd_fallback(*args, **kwargs) + return inner + + +# TODO(#101191): Use the actual C++ autograd not implemented fallback, +# or change the default autograd fallback to the autograd not implemented fallback. +def autograd_not_implemented(custom_op): + def kernel(*args, **kwargs): + if torch.is_grad_enabled() and pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) and x.requires_grad, (args, kwargs) + ): + raise RuntimeError("Autograd has not been implemented for operator") + with torch._C._AutoDispatchBelowAutograd(): + return custom_op(*args, **kwargs) + return kernel + + +def mark_non_differentiable(ctx, output, output_differentiability): + # Output types are restricted to be: + # - Tensor + # - Tensor[] + # - int, bool, Scalar, float + # See _check_can_register_backward + if output_differentiability is not None: + if not isinstance(output, tuple): + tuple_output = (output,) + else: + tuple_output = output # type: ignore[assignment] + assert len(output_differentiability) == len(tuple_output) + non_differentiable_tensors = [] + for idx, (differentiable, out) in enumerate(zip(output_differentiability, tuple_output)): + if isinstance(out, torch.Tensor): + if not differentiable: + non_differentiable_tensors.append(out) + continue + if isinstance(out, list): + if not differentiable: + non_differentiable_tensors.extend(out) + continue + if differentiable: + raise RuntimeError( + f"With output_differentiability={output_differentiability}. " + f"At idx {idx}, we received an object of type {type(out)} that " + f"is not a Tensor, so it cannot have be marked as differentiable in " + f"output_differentiability.") + if non_differentiable_tensors: + ctx.mark_non_differentiable(*non_differentiable_tensors) + + +def construct_autograd_kernel( + schema, + output_differentiability, + custom_op, + op_overload, + save_for_backward_fn, + backward_fn): + + def apply(*args): + flat_args, spec = pytree.tree_flatten(args) + out_spec = None + + def forward(ctx, *flat_args): + ctx.set_materialize_grads(True) + args = pytree.tree_unflatten(list(flat_args), spec) + with torch._C._AutoDispatchBelowAutograd(): + output = op_overload(*args) + + # We use the info about args to give better error messages in backward + args_info = namedtuple_args( + schema, pytree.tree_map(type, args)) + + save_for_backward_fn_inputs = namedtuple_args(schema, args) + to_save = save_for_backward_fn(save_for_backward_fn_inputs, output) + + save_pytree_for_backward(ctx, (to_save, args_info)) + mark_non_differentiable(ctx, output, output_differentiability) + + nonlocal out_spec + flat_output, out_spec = pytree.tree_flatten(output) + return tuple(flat_output) + + def backward(ctx, *flat_grad_output): + assert out_spec is not None + grads = pytree.tree_unflatten(list(flat_grad_output), out_spec) + saved, args_info = unpack_saved(ctx) + # There is nothing on the ctx object for now, it is just there so + # that we can add additional things in the future. + inner_ctx = object() + if not isinstance(grads, tuple): + grads = (grads,) + grad_inputs_dict = backward_fn(inner_ctx, saved, *grads) + + # Massage the grad_inputs_dict to a form acceptable by + # autograd.Function. + validate_grad_inputs_dict(grad_inputs_dict, custom_op, args_info) + return grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info) + + generated_cls = gen_autograd_function( + custom_op._opname + '_customop', forward, backward) + + flat_output = generated_cls.apply(*flat_args) + assert out_spec is not None + return pytree.tree_unflatten(list(flat_output), out_spec) + return apply + + +def gen_autograd_function(name, forward, backward): + generated_cls = type( + name, + (torch.autograd.Function,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + } + ) + return generated_cls + + +@functools.lru_cache +def namedtuple_args_cls(schema): + attribs = [arg.name for arg in schema.arguments.flat_all] + name = str(schema.name) + "_args" + # mypy doesn't support dynamic namedtuple name + tuple_cls = namedtuple(name, attribs) # type: ignore[misc] + return tuple_cls + + +def namedtuple_args(schema, args): + assert isinstance(args, tuple) + tuple_cls = namedtuple_args_cls(schema) + return tuple_cls(*args) + + +def validate_grad_inputs_dict(grad_inputs_dict, forward_op, args_info): + def error(what): + backward = forward_op._get_impl('backward') + raise RuntimeError( + f"In the backward function defined for {forward_op} at " + f"{backward.location} using the CustomOp API, {what}") + + if not isinstance(grad_inputs_dict, dict): + error(f"expected the output of the backward function to be a dict but " + f"got {type(grad_inputs_dict)}") + + expected_keys = {arg.name for arg in forward_op._schema.arguments.flat_all + if arg.type.is_tensor_like()} + actual_keys = grad_inputs_dict.keys() + if expected_keys != actual_keys: + error(f"expected the returned grad_input dict to have keys " + f"{expected_keys} but got {actual_keys}. The backward " + f"function must return a gradient (can be None) for each arg " + f"to the CustomOp that may be a Tensor or Sequence[Tensor]. " + f"Args declared to be non-Tensor-like types should not appear " + f"in the grad_input dict") + + for name, grad in grad_inputs_dict.items(): + arg_info = getattr(args_info, name) + + if isinstance(arg_info, list): + if not isinstance(grad, (tuple, list)): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of gradients but got object of type " + f"{type(grad)}.") + if not len(grad) == len(arg_info): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of {len(arg_info)} gradients but got " + f"{len(grad)}") + for idx, (g, info) in enumerate(zip(grad, arg_info)): + if g is None: + continue + if not isinstance(g, torch.Tensor): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of None or Tensor gradients but got " + f"object of {type(g)} at index {idx}") + if not issubclass(info, torch.Tensor): + error(f"for input '{name}', got a Tensor as the gradient " + f"for the {idx}-th value but expected None because " + f"the {idx}-th value was not a Tensor (it was " + f"type {arg_info}") + continue + + if grad is None: + continue + if not isinstance(grad, torch.Tensor): + error(f"got object of type {type(grad)} as the gradient for input " + f"'{name}', " + f"but expected the gradient to be either None or a Tensor") + if not issubclass(arg_info, torch.Tensor): + error(f"got a Tensor as the gradient for input '{name}' but " + f"expected None as the gradient because input '{name}' " + f"was not a Tensor (it was type {arg_info}).") + + +def grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info): + result = [] + for name, arg_info in args_info._asdict().items(): + if name not in grad_inputs_dict: + result.append(pytree.tree_map(lambda x: None, arg_info)) + continue + result.append(grad_inputs_dict[name]) + return tuple(pytree.tree_leaves(result)) + +# Saves "stuff" (a pytree) onto the ctx object. Use unpack_saved to unpack it. +# autograd.Function prefers that users use ctx.save_for_backward to +# save Tensors (to avoid reference cycles) and for non-Tensors to go onto the +# ctx object. +def save_pytree_for_backward(ctx, stuff): + flat_stuff, spec = pytree.tree_flatten(stuff) + num_elts = len(flat_stuff) + tensor_idxs = [idx for idx, thing in enumerate(flat_stuff) + if isinstance(thing, torch.Tensor)] + non_tensor_idxs = [idx for idx, thing in enumerate(flat_stuff) + if not isinstance(thing, torch.Tensor)] + tensors = [thing for thing in flat_stuff if isinstance(thing, torch.Tensor)] + non_tensors = [thing for thing in flat_stuff if not isinstance(thing, torch.Tensor)] + + ctx.spec = spec + ctx.num_elts = num_elts + ctx.save_for_backward(*tensors) + ctx.tensor_idxs = tensor_idxs + ctx.saved_non_tensors = non_tensors + ctx.non_tensor_idxs = non_tensor_idxs + + +# Inverse operation to save_pytree_for_backward +def unpack_saved(ctx): + flat_stuff = [None] * ctx.num_elts + for tensor, idx in zip(ctx.saved_tensors, ctx.tensor_idxs): + flat_stuff[idx] = tensor + for non_tensor, idx in zip(ctx.saved_non_tensors, ctx.non_tensor_idxs): + flat_stuff[idx] = non_tensor + stuff = pytree.tree_unflatten(flat_stuff, ctx.spec) + return stuff diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/functional.py b/parrot/lib/python3.10/site-packages/torch/_custom_op/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..57ff351e2e2d53a217008e793c57b1e3867ebe54 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_custom_op/functional.py @@ -0,0 +1,188 @@ +# mypy: allow-untyped-defs +import weakref + +import torch +import torch.utils._pytree as pytree +from torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet +from torch._ops import OpOverload +from torch.library import Library +from torchgen.model import ( + BaseTy, + BaseType, + FunctionSchema, + OperatorName, + OptionalType, + SchemaKind, +) + +from .autograd import autograd_not_implemented + + +def register_functional_op( + lib: Library, + new_op_name: str, + mutable_op: OpOverload, +) -> None: + """Given a mutable operator, registers the functional variant. + + This API also correctly links the functional variant with the mutable + operator for the purposes of functionalization. + + All of the new registrations are performed on the ``lib`` passed in. + + Arguments: + lib (Library): Should be a torch.library.Library object that has + the same namespace as ``mutable_op``'s namespace. + lib will be used to register the new functional op as well + as a functionalization kernel for the ``mutable_op`` + If you don't have a library handy, use + ``torch.library.Library(ns, 'FRAGMENT')`` to construct one. + new_op_name (str): The name of the functional operator (without the + namespace). If no namespace, the new functional variant will be + accessible under ``torch.ops.{lib.ns}.new_op_name``. + mutable_op (OpOverload): The mutable custom operator. Note + that you may need to add a `.default` to it, like + `torch.ops.aten.abs_.default`. + + """ + validate(mutable_op) + schema = functional_schema(new_op_name, mutable_op) + lib.define(schema) + + functional_impl = construct_functional_impl(mutable_op) + lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd') + + functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default + + # There's no easy way for us to generate the autograd kernel, so we + # use autograd_not_implemented. Also, this makes it so that the user + # is unable to register an autograd formula themselves. This shouldn't + # be a problem if the user doesn't use the functional op direclty + # in their program, but we may need to revist this in the future. + lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd') + + f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op) + + lib.impl(mutable_op, f_kernel, 'Functionalize') + + +def construct_functional_impl(mutable_op): + def functional_impl(*args): + # Strategy: + # - clone args that would have been mutated + # - run mutable_op + # - return the cloned args as additional outputs + new_args = [] + extra_rets = [] + for is_write, arg in zip(mutable_args(mutable_op), args): + if is_write: + cloned = arg.clone() if arg is not None else None + new_args.append(cloned) + extra_rets.append(cloned) + else: + new_args.append(arg) + result = mutable_op(*new_args) + if result is None: + return tuple(extra_rets) + if isinstance(result, tuple): + return (*result, *extra_rets) + return (result, *extra_rets) + return functional_impl + + +def construct_functionalization_kernel(mutable_op, functional_op): + def kernel(*args): + # There's nothing to be functionalized! + # We can still end up here because DispatchKey::Functionalize is a mode key + if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args): + with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)): + return mutable_op(*args) + + # NB: This differs from the codegen -- codegen handles cases where there + # are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper. + # This only really matters for XLA (mixed CPU-XLA tensors) and + # running functionalization without the PT2 stack (which guarantees to us that + # all tensors are FunctionalTensorWrapper). + if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args): + raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper") + + unwrapped_args = [] + for arg in args: + if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg): + torch._sync(arg) + unwrapped = torch._from_functional_tensor(arg) + unwrapped_args.append(unwrapped) + else: + unwrapped_args.append(arg) + + with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)): + output = functional_op(*unwrapped_args) + + num_actual_output = len(mutable_op._schema.returns) + actual_output = pytree.tree_map( + torch._to_functional_tensor, output[:num_actual_output]) + + new_values_to_propagate = output[num_actual_output:] + inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args) + if is_write] + assert len(new_values_to_propagate) == len(inputs_to_replace) + for new_value, arg in zip(new_values_to_propagate, inputs_to_replace): + if (arg is None and new_value is None) or (arg is not None and new_value is not None): + continue + torch._C._propagate_xla_data(arg, new_value) + torch._C._replace_(arg, new_value) + torch._C._commit_update(arg) + torch._sync(arg) + + if len(actual_output) == 1: + return actual_output[0] + elif len(actual_output) == 0: + return None + return actual_output + + return kernel + + +def validate(mutable_op: OpOverload): + if not isinstance(mutable_op, OpOverload): + raise TypeError( + f"register_functional_op(mutable_op): expected mutable_op to be instance of " + f"OpOverload but got {type(mutable_op)}") + + # There are generally three types of "in-place" or "mutable" ops. + # Each of them have their own conventions: + # - inplace (first input modified in-place and returned as only output) + # - out= (some args modified in-place and returned as outputs) + # - mutable (some args modified in-place but none of those returned as outputs) + # In theory we can support all three, but we'll just support the last + # option right now for simplicity. + schema = FunctionSchema.parse(str(mutable_op._schema)) + if not schema.kind() == SchemaKind.mutable: + raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)") + for ret in schema.returns: + # construct_functionalization_kernel assumes this for simplicity + if ret.annotation is not None: + raise NotImplementedError( + "NYI: register_functional_op(op) where op returns a mutated or aliased value. " + "Please file an issue (and as a workaround, modify your operator to " + "not return the mutated value or aliases)") + for arg in schema.arguments.flat_all: + # construct_functionalization_kernel assumes this for simplicity + if arg.type.is_tensor_like() and ( + arg.type != BaseType(BaseTy.Tensor) + and arg.type != OptionalType(BaseType(BaseTy.Tensor)) + ): + raise NotImplementedError( + "NYI: register_functional_op(op) where op has a List[Tensor] input." + "Please file an issue.") + + +def functional_schema(new_op_name, op: OpOverload): + schema = FunctionSchema.parse(str(op._schema)) + schema = schema.signature().with_name(OperatorName.parse(new_op_name)) + return str(schema) + + +def mutable_args(op: OpOverload): + return tuple(False if arg.alias_info is None else arg.alias_info.is_write + for arg in op._schema.arguments) diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_op/impl.py b/parrot/lib/python3.10/site-packages/torch/_custom_op/impl.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3efce60a8108c70ffb9d0ec5efca926126e6a8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_custom_op/impl.py @@ -0,0 +1,873 @@ +# mypy: allow-untyped-defs +import dataclasses +import functools +import inspect +import sys +import typing +import weakref + +from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy + +import torch +import torch._C as _C +import torch.library as library +from torch._library.abstract_impl import AbstractImplCtx +from torch.library import get_ctx + +from .autograd import autograd_kernel_indirection, construct_autograd_kernel +import torch._library.infer_schema +from torch._library.infer_schema import infer_schema + +""" +For a detailed guide on custom ops, please see +https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + +This file includes pieces of the implementation of our custom operator API. +""" + +__all__ = ["custom_op", "CustomOp", "get_ctx", "AbstractImplCtx"] + + +SUPPORTED_DEVICE_TYPE_TO_KEY = { + "cpu": "CPU", + "cuda": "CUDA", +} + +# We will not let users register CustomOps with anything that could look like +# PyTorch internals to avoid confusion. +RESERVED_NS = { + "prim", + "prims", + "aten", + "at", + "torch", + "pytorch", +} + + +def custom_op( + qualname: str, manual_schema: typing.Optional[str] = None +) -> typing.Callable: + r"""Creates a new CustomOp object. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + In PyTorch, defining an op (short for "operator") is a two step-process: + - we need to define (create) the op + - we need to implement behavior for how the operator interacts with + various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc. + + This entrypoint defines the CustomOp object (the first step); + you must then perform the second step by calling various methods on + the CustomOp object. + + This API is used as a decorator (see examples). + + Arguments: + qualname (str): Should be a string that looks like + "namespace::operator_name". Operators in PyTorch need a namespace to + avoid name collisions; a given operator may only be created once. + If you are writing a Python library, we recommend the namespace to + be the name of your top-level module. The operator_name must be + the same as the name of the function you pass to custom_op + (see examples). + manual_schema (Optional[str]): Each PyTorch operator needs a schema that + tells PyTorch the types of the inputs/outputs. If None (default), + we will infer the schema from the type annotations on the function + (see examples). Otherwise, if you don't want to use type annotations, + you may provide us the schema string. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Step 1: define the CustomOp. + >>> # We need to provide the decorator a "prototype function" + >>> # (a function with Python ellipses as the body). + >>> @custom_op("my_library::numpy_sin") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> ... + >>> + >>> # numpy_sin is now an instance of class CustomOp + >>> print(type(numpy_sin)) + >>> + >>> # Step 2: Register an implementation for various PyTorch subsystems + >>> + >>> # Register an implementation for CPU tensors + >>> @numpy_sin.impl('cpu') + >>> def numpy_sin_impl_cpu(x): + >>> return torch.from_numpy(np.sin(x.numpy())) + >>> + >>> # Register an implementation for CUDA tensors + >>> @numpy_sin.impl('cuda') + >>> def numpy_sin_impl_cuda(x): + >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> numpy_sin(x) # calls numpy_sin_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> numpy_sin(x) # calls numpy_sin_impl_cuda + + """ + + def inner(func): + if not inspect.isfunction(func): + raise ValueError( + f"custom_op(...)(func): Expected `func` to be a Python " + f"function, got: {type(func)}" + ) + + ns, name = parse_qualname(qualname) + validate_namespace(ns) + if func.__name__ != name: + raise ValueError( + f"custom_op(qualname='{qualname}', ...)(func): expected `func` " + f"to have name '{name}' but got '{func.__name__}'. " + f"Please either change the name of `func` or the qualname that " + f"is passed to `custom_op`" + ) + + schema = infer_schema(func) if manual_schema is None else manual_schema + schema_str = f"{name}{schema}" + function_schema = FunctionSchema.parse(schema_str) + validate_schema(function_schema) + if manual_schema is not None: + validate_function_matches_schema(function_schema, func) + + lib = library.Library(ns, "FRAGMENT") + lib.define(schema_str) + ophandle = find_ophandle_or_throw(ns, function_schema.name) + result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True) + + result.__name__ = func.__name__ + result.__module__ = func.__module__ + result.__doc__ = func.__doc__ + + library.impl(lib, result._opname, "Autograd")( + autograd_kernel_indirection(weakref.proxy(result)) + ) + + torch._C._dispatch_set_report_error_callback( + ophandle, functools.partial(report_error_callback, weakref.proxy(result)) + ) + + return result + + return inner + + +# Global dictionary holding references to all CustomOp objects +# Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime]) +# Used to query the CustomOp associated with a specific C++ dispatcher operator. +# An example usage is FakeTensor: FakeTensor checks if a specific operator +# has an implementation registered via the CustomOp API. +# Indexed by qualname (e.g. aten::foo) +global_registry: typing.Dict[str, "CustomOp"] = {} + + +class CustomOp: + r"""Class for custom operators in PyTorch. + + Use the CustomOp API to create user-defined custom operators that behave + just like regular PyTorch operators (e.g. torch.sin, torch.mm) when it + comes to various PyTorch subsystems (like torch.compile). + + To construct a `CustomOp`, use `custom_op`. + """ + + def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False): + super().__init__() + if not _private_access: + raise RuntimeError( + "The CustomOp constructor is private and we do not guarantee " + "BC for it. Please use custom_op(...) to create a CustomOp object" + ) + name = f"{cpp_ns}::{operator_name}" + self._schema = schema + self._cpp_ns = cpp_ns + self._lib: library.Library = lib + self._ophandle: _C._DispatchOperatorHandle = ophandle + # Has the name of the op, e.g. "foo". We cache here for convenience. + self._opname: str = operator_name + # this is _opname but with namespace. e.g. "custom::foo" + self._qualname: str = name + self.__name__ = None # mypy requires this + # NB: Some of these impls are registered as kernels to DispatchKeys. + # Modifying the _impls dict directly won't do anything in that case. + self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {} + # See NOTE [CustomOp autograd kernel indirection] + self._registered_autograd_kernel_indirection = False + + global_registry[self._qualname] = self + + def _register_autograd_kernel_indirection(self): + assert not self._registered_autograd_kernel_indirection + self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd") + self._registered_autograd_kernel_indirection = True + + # Records the impl and the source location in self._impls + # Note that this doesn't cause torch.library to use the impl, that + # needs to be done in a separate self._lib.impl call. + def _register_impl(self, kind, func, stacklevel=2): + if self._has_impl(kind): + func_and_location = self._impls[kind] + assert func_and_location is not None # Pacify mypy + location = func_and_location.location + raise RuntimeError( + f"Attempting to register a {kind} impl for operator {self._qualname} " + f"that already has a {kind} impl registered from Python at " + f"{location}. This is not supported." + ) + frame = inspect.getframeinfo(sys._getframe(stacklevel)) + location = f"{frame.filename}:{frame.lineno}" + self._impls[kind] = FuncAndLocation(func, location) + + def _get_impl(self, kind): + return self._impls[kind] + + def _has_impl(self, kind): + return kind in self._impls + + def _destroy(self): + # NOTE: [CustomOp lifetime] + # A CustomOp, once created, lives forever. The mechanism is that the + # global registry holds a reference to it. However, to make testing + # easier, we want to be able to destroy CustomOp objects. + # CustomOp._destroy does the job, though it leaves the CustomOp + # in a garbage state. + del self._lib + + opnamespace = getattr(torch.ops, self._cpp_ns) + if hasattr(opnamespace, self._opname): + delattr(opnamespace, self._opname) + + del global_registry[self._qualname] + + def __repr__(self): + return f'' + + def __call__(self, *args, **kwargs): + # Bypass torch.ops.* and directly do OperatorHandle::callBoxed. + # Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime + # issues from caching operators that make testing CustomOp difficult). + result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs) + return result + + def impl( + self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2, + ) -> typing.Callable: + r"""Register an implementation for a device type for this CustomOp object. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + If the CustomOp is passed multiple Tensor inputs with different device + types, it will dispatch to the registered implementation for the highest + priority device type among those present. + The supported device types, in order of priority, are {'cuda', 'cpu'}. + + This API is used as a decorator (see examples). + + Arguments: + device_types (str or Iterable[str]): the device type(s) to register the function for. + + Examples:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> @custom_op("my_library::numpy_cos") + >>> def numpy_cos(x: Tensor) -> Tensor: + >>> ... + >>> + >>> # Register an implementation for CPU Tensors + >>> @numpy_cos.impl('cpu') + >>> def numpy_cos_impl_cpu(x): + >>> return torch.from_numpy(np.cos(x.numpy())) + >>> + >>> # Register an implementation for CUDA Tensors + >>> @numpy_cos.impl('cuda') + >>> def numpy_cos_impl_cuda(x): + >>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> numpy_cos(x) # calls numpy_cos_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> numpy_cos(x) # calls numpy_cos_impl_cuda + + """ + if isinstance(device_types, str): + device_types = [device_types] + for device_type in device_types: + validate_device_type(device_type) + + def inner(f): + for device_type in set(device_types): + self._check_doesnt_have_library_impl(device_type) + self._register_impl(device_type, f, stacklevel=_stacklevel) + dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type] + library.impl(self._lib, self._opname, dispatch_key)(f) + return f + + return inner + + def _check_doesnt_have_library_impl(self, device_type): + if self._has_impl(device_type): + return + key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type] + if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key): + raise RuntimeError( + f"impl(..., device_types={device_type}): the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing torch.library or TORCH_LIBRARY registration.") + + def impl_factory(self) -> typing.Callable: + r"""Register an implementation for a factory function.""" + + def inner(f): + self._register_impl("factory", f) + library.impl(self._lib, self._opname, "BackendSelect")(f) + return f + + return inner + + def impl_abstract(self, _stacklevel=2) -> typing.Callable: + r"""Register an abstract implementation for this operator. + + WARNING: please do not use this directly (and instead use the torch._custom_ops + APIs). Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + An "abstract implementation" specifies the behavior of this operator on + Tensors that carry no data. Given some input Tensors with certain properties + (sizes/strides/storage_offset/device), it specifies what the properties of + the output Tensors are. + + The abstract implementation has the same signature as the operator. + It is run for both FakeTensors and meta tensors. To write an abstract + implementation, assume that all Tensor inputs to the operator are + regular CPU/CUDA/Meta tensors, but they do not have storage, and + you are trying to return regular CPU/CUDA/Meta tensor(s) as output. + The abstract implementation must consist of only PyTorch operations + (and may not directly access the storage or data of any input or + intermediate Tensors). + + This API is used as a decorator (see examples). + + Examples:: + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Example 1: an operator without data-dependent output shape + >>> @custom_op('my_library::custom_linear') + >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: + >>> ... + >>> + >>> @custom_linear.impl_abstract() + >>> def custom_linear_abstract(x, weight): + >>> assert x.dim() == 2 + >>> assert weight.dim() == 2 + >>> assert bias.dim() == 1 + >>> assert x.shape[1] == weight.shape[1] + >>> assert weight.shape[0] == bias.shape[0] + >>> assert x.device == weight.device + >>> + >>> return (x @ weight.t()) + bias + >>> + >>> # Example 2: an operator with data-dependent output shape + >>> @custom_op('my_library::custom_nonzero') + >>> def custom_nonzero(x: Tensor) -> Tensor: + >>> ... + >>> + >>> @custom_nonzero.impl_abstract() + >>> def custom_nonzero_abstract(x): + >>> # Number of nonzero-elements is data-dependent. + >>> # Since we cannot peek at the data in an abstract impl, + >>> # we use the ctx object to construct a new symint that + >>> # represents the data-dependent size. + >>> ctx = torch._custom_op.get_ctx() + >>> nnz = ctx.create_unbacked_symint() + >>> shape = [x.dim(), nnz] + >>> result = x.new_empty(shape, dtype=torch.long) + >>> return result + >>> + >>> @custom_nonzero.impl(['cpu', 'cuda']) + >>> def custom_nonzero_impl(x): + >>> x_np = to_numpy(x) + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> # unbacked symbolic ints in PyTorch must be >= 2, so we + >>> # constrain the range to at least 2 + >>> if res.shape[0] <= 1: + >>> raise RuntimeError("not supported") + >>> return torch.tensor(res, device=x.device) + + """ + + def inner(f): + self._check_doesnt_have_library_meta_impl() + self._register_impl("abstract", f, stacklevel=_stacklevel) + location = self._get_impl("abstract").location + + qualname = self._qualname + + # Handle DispatchKey.Meta registration + @functools.wraps(f) + def f_with_ctx(*args, **kwargs): + def error_on_ctx(): + raise RuntimeError( + f"Attempted to call get_ctx() for the meta implementation " + f"for {qualname}." + f"You have presumably called get_ctx() because the operator " + f"has a data-dependent output shape; if so, there is no " + f"such meta implementation and this error is the correct " + f"behavior. Otherwise, please remove the call to get_ctx() " + f"in the implementation registered with impl_abstract " + f"at {location}" + ) + + with torch._library.abstract_impl.set_ctx_getter(error_on_ctx): + return f(*args, **kwargs) + + self._lib.impl(self._opname, f_with_ctx, "Meta") + return f + + return inner + + def _check_can_register_backward(self): + def error(detail): + raise RuntimeError( + f"Cannot use torch._custom_ops APIs to register backward " + f"formula for {detail}. Got operator " + f"{self._qualname} with schema: {schema}" + ) + + schema = self._schema + if schema.kind() != SchemaKind.functional: + error("non-functional operator") + + rets = schema.returns + if not schema.returns: + error("operator with no returns") + + assert len(rets) > 0 + is_non_mutating_view = any( + r.annotation is not None and not r.annotation.is_write for r in rets + ) + if is_non_mutating_view: + error("operator that returns views") + + # We make assumptions about the schema's return types. + allowed_return_types = { + BaseType(BaseTy.int): "int", + BaseType(BaseTy.SymInt): "SymInt", + BaseType(BaseTy.bool): "bool", + BaseType(BaseTy.float): "float", + BaseType(BaseTy.Tensor): "Tensor", + ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]", + } + for ret in schema.returns: + if ret.type in allowed_return_types: + continue + error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})") + + def _check_doesnt_have_library_autograd_impl(self): + if self._registered_autograd_kernel_indirection: + return + + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"): + raise RuntimeError( + f"impl_backward/impl_save_for_backward: the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing registration to DispatchKey::CompositeImplicitAutograd." + f"CompositeImplicitAutograd operators do not need an autograd formula; " + f"instead, the operator will decompose into its constituents and those " + f"can have autograd formulas defined on them.") + + # We can improve this by adding "all Autograd keys", but + # realistically people will just be using this API for CPU/CUDA for now. + for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]: + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key): + raise RuntimeError( + f"impl_backward/impl_save_for_backward: " + f"the operator {self._qualname} already has an Autograd kernel " + f"registered to DispatchKey::{key} vi a pre-existing " + f"torch.library or TORCH_LIBRARY registration. Please either " + f"remove those registrations or don't use the torch._custom_ops APIs") + + def _check_doesnt_have_library_meta_impl(self): + if self._has_impl("abstract"): + return + + # If the user's operator is CompositeExplicitAutograd, + # allow them to impl_abstract. This is being pragmatic + # (existing custom ops may have CompositeExplicitAutograd + # registration that don't work with Meta kernels, so this + # gives them an escape hatch). + if ( + _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd") + and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta") + ): + return + + # Otherwise, if the user's already has a Meta kernel or their + # op is CompositeImplicitAutograd or some other alias dispatch key, + # raise. + + # Special case for CompositeImplicitAutograd + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"): + raise RuntimeError( + f"impl_abstract(...): the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing registration to DispatchKey::CompositeImplicitAutograd." + f"CompositeImplicitAutograd operators do not need an abstract impl; " + f"instead, the operator will decompose into its constituents and those " + f"can have abstract impls defined on them.") + + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta"): + raise RuntimeError( + f"impl_abstract(...): the operator {self._qualname} " + f"already has an DispatchKey::Meta implementation via a " + f"pre-existing torch.library or TORCH_LIBRARY registration. " + f"Please either remove that registration or don't call impl_abstract.") + + # NOTE ["backward", "save_for_backward", and "autograd"] + # As a part of the explicit autograd API, a user must provide us + # a "save_for_backward" function and a "backward" function. + # When both of these have been provided, then we automatically + # construct the "autograd" kernel. + def _register_autograd_kernel(self): + assert self._has_impl("backward") + assert self._has_impl("save_for_backward") + kernel = construct_autograd_kernel( + self._schema, + self._output_differentiability, + self, + get_op(self._qualname), + self._get_impl("save_for_backward").func, + self._get_impl("backward").func) + self._register_impl("autograd", kernel) + + def impl_save_for_backward(self, _stacklevel=2): + r"""Register a function that tells us what to save for backward. + + Please see impl_backward for more details. + """ + def inner(f): + self._check_can_register_backward() + self._check_doesnt_have_library_autograd_impl() + if not self._registered_autograd_kernel_indirection: + self._register_autograd_kernel_indirection() + self._register_impl("save_for_backward", f, stacklevel=_stacklevel) + if self._has_impl("backward"): + self._register_autograd_kernel() + return inner + + def impl_backward(self, output_differentiability=None, _stacklevel=2): + r"""Registers a backward formula. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + In order for the CustomOp to work with autograd, you need to register + a backward formula. There are two pieces to this: + 1. You must give us a function to specify what to save for backward. + Call this the "save for backward" function. + 2. You must give us a function that computes gradients. Call this the + "backward" function. + + Use `impl_save_for_backward` to define a "save for backward" function + that specifies what gets saved for backward. The function should accept + two arguments ``(inputs, output)`` and return the quantities to be saved + for backward. + + During runtime, when you call the CustomOp, PyTorch will invoke the + "save for backward" function with the inputs and output of the CustomOp. + + Use `impl_backward` to define the "backward" function. The backward + function must accept ``(ctx, saved, *grads)``: + - ``ctx`` is a context object where we may provide information + - ``saved`` is exactly what gets returned from the "save for backward" + function + - ``grads`` is one or more gradients. The number of gradients matches + the number of outputs of the CustomOp. + + The backward function must return a dict that maps the name of + an input to the CustomOp to its corresponding gradient. All inputs that + were declared to be Tensors in the CustomOp definition must be accounted + for in the dict. The gradient may be a Tensor or None. + + """ + if output_differentiability is not None: + def yell(): + raise RuntimeError( + f"impl_backward(output_differentiability): expected " + f"output_differentiability to be a list of bools with " + f"length equal to the number of outputs of this CustomOp " + f"got: {output_differentiability}") + + if not isinstance(output_differentiability, list): + yell() + for diff in output_differentiability: + if not isinstance(diff, bool): + yell() + if len(self._schema.returns) != len(output_differentiability): + yell() + + def inner(f): + self._check_can_register_backward() + self._check_doesnt_have_library_autograd_impl() + if not self._registered_autograd_kernel_indirection: + self._register_autograd_kernel_indirection() + self._register_impl("backward", f, stacklevel=_stacklevel) + self._output_differentiability = output_differentiability + if self._has_impl("save_for_backward"): + self._register_autograd_kernel() + return inner + + +@dataclasses.dataclass +class FuncAndLocation: + func: typing.Callable + location: str + + +def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName): + overload_name = ( + "" if operator_name.overload_name is None else operator_name.overload_name + ) + return _C._dispatch_find_schema_or_throw( + f"{cpp_ns}::{str(operator_name.name)}", overload_name + ) + + +def validate_namespace(ns: str) -> None: + if "." in ns: + raise ValueError( + f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a ' + f"valid variable name)" + ) + if ns in RESERVED_NS: + raise ValueError( + f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, " + f"please choose something else. " + ) + +def validate_schema(schema: FunctionSchema) -> None: + if not torch._library.utils.is_functional_schema(schema): + raise ValueError( + f"custom_op only supports functional operators " + f"(ops that do not mutate any inputs, do not return " + f"views of the inputs, and has at least one return). " + f"Got the following non-functional schema: {schema}" + ) + + # For simplicity: don't allow self arguments + if schema.arguments.self_arg is not None: + raise ValueError( + f"custom_op does not support arguments named 'self'. Please " + f"rename your argument. Got: {schema}" + ) + + +def parse_qualname(qualname: str) -> typing.Tuple[str, str]: + names = qualname.split("::", 1) + if len(names) != 2: + raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The " + f"operator name should look something like ns::foo") + if '.' in names[1]: + raise ValueError(f"The torch.custom_ops APIs do not handle overloads, " + f"i.e. operator names with '.' in them. " + f"Please name your operator something like ns::foo. " + f"Got: {qualname}") + return names[0], names[1] + + +def validate_device_type(device_type: str) -> None: + if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY: + raise ValueError( + f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type " + f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}." + ) + + +def supported_param(param: inspect.Parameter) -> bool: + return param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + ) + + +def validate_function_matches_schema( + schema: FunctionSchema, func: typing.Callable +) -> None: + sig = inspect.signature(func) + + if not all(supported_param(p) for _, p in sig.parameters.items()): + raise ValueError( + f"custom_op(..., manual_schema)(func): positional-only args, " + f"varargs, and kwargs are not supported. Please rewrite `func` " + f"to not have them. Got `func` with signature: {sig}" + ) + + if ( + any( + p.annotation is not inspect.Parameter.empty + for _, p in sig.parameters.items() + ) + or sig.return_annotation is not inspect.Signature.empty + ): + raise ValueError( + f"custom_op(..., manual_schema)(func): When passing in a manual " + f"schema, we expect `func` to have no type annotations to avoid " + f"ambiguity. Got `func` with signature: {sig}" + ) + + positional = [ + (name, param) + for name, param in sig.parameters.items() + if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + ] + kwargonly = [ + (name, param) + for name, param in sig.parameters.items() + if param.kind == inspect.Parameter.KEYWORD_ONLY + ] + + def error(): + raise ValueError( + f"custom_op(..., manual_schema)(func): When passing in a manual " + f"schema, we expect `func`'s signature to match `manual_schema` " + f"(aside from type annotations). " + f"func's signature: {sig}, manual_schema: {schema}" + ) + + def error_default_args(): + raise ValueError( + f"custom_op(..., manual_schema)(func): " + f"neither func nor manual_schema should have default " + f"arguments. Got " + f"func's signature: {sig}, manual_schema: {schema}" + ) + + def compare(sig_args, schema_args): + if len(sig_args) != len(schema_args): + error() + for (name, param), arg in zip(sig_args, schema_args): + if name != arg.name: + error() + if param.default is not inspect.Parameter.empty or arg.default is not None: + error_default_args() + + compare(positional, schema.arguments.flat_positional) + compare(kwargonly, schema.arguments.flat_kwarg_only) + + +def report_error_callback(custom_op: typing.Any, key: str) -> None: + if key == "Undefined": + raise NotImplementedError( + f"{custom_op}: There were no Tensor inputs to this operator " + f"(e.g. you passed an empty list of Tensors). If your operator is a " + f"factory function (that is, it takes no Tensors and constructs " + f"a new one), then please use CustomOp.impl_factory to register " + f"an implementation for it" + ) + if key == "Meta": + raise NotImplementedError( + f"{custom_op}: when running with device='Meta' tensors: there is no " + f"abstract impl registered for this CustomOp. Please register one via " + f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors" + ) + if key in ("CPU", "CUDA"): + device = key.lower() + raise NotImplementedError( + f"{custom_op}: when running with device='{device}' tensors: there is no " + f"{device} impl registered for this CustomOp. Please register one via " + f"CustomOp.impl(device_type='{device}')" + ) + raise NotImplementedError( + f"{custom_op}: No implementation for dispatch key {key}. It is likely " + f"that we have not added this functionality yet, please either open an " + f"issue or if you're feeling adventurous, use the low-level " + f"torch.library API" + ) + + +def custom_op_from_existing(op): + ns = op.namespace + lib = torch.library.Library(ns, "FRAGMENT") + name = op.name().split("::")[-1] + schema_str = str(op._schema) + # CustomOp expects the schema string without the namespace + schema_str = schema_str.split("::")[-1] + schema = FunctionSchema.parse(schema_str) + return CustomOp(lib, ns, schema, name, op, _private_access=True) + + +def get_op(qualname): + def error_not_found(): + raise ValueError( + f"Could not find the operator {qualname}. Please make sure you have " + f"already registered the operator and (if registered from C++) " + f"loaded it via torch.ops.load_library.") + + ns, name = parse_qualname(qualname) + if not hasattr(torch.ops, ns): + error_not_found() + opnamespace = getattr(torch.ops, ns) + if not hasattr(opnamespace, name): + error_not_found() + packet = getattr(opnamespace, name) + if not hasattr(packet, 'default'): + error_not_found() + return packet.default + + +def _find_custom_op(qualname, also_check_torch_library=False): + if qualname in global_registry: + return global_registry[qualname] + if not also_check_torch_library: + raise RuntimeError( + f'Could not find custom op "{qualname}". Did you register it via ' + f"the torch._custom_ops API?") + overload = get_op(qualname) + result = custom_op_from_existing(overload) + return result + + +def get_abstract_impl(qualname): + if qualname not in torch._custom_op.impl.global_registry: + return None + custom_op = torch._custom_op.impl.global_registry[qualname] + if custom_op is None: + return None + if not custom_op._has_impl("abstract"): + return None + return custom_op._get_impl("abstract").func + + +def _custom_op_with_schema(qualname, schema, needs_fixed_stride_order=True): + ns, name = qualname.split("::") + schema_str = f"{name}{schema}" + function_schema = FunctionSchema.parse(schema_str) + validate_schema(function_schema) + tags = [torch._C.Tag.needs_fixed_stride_order] if needs_fixed_stride_order else [] + lib = library.Library(ns, "FRAGMENT") + lib.define(schema_str, tags=tags) + ophandle = find_ophandle_or_throw(ns, function_schema.name) + result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True) + result._register_autograd_kernel_indirection() + + torch._C._dispatch_set_report_error_callback( + ophandle, functools.partial(report_error_callback, weakref.proxy(result)) + ) + return get_op(qualname) diff --git a/parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8cda1ca433010e638db4c749b5c89c104cd665a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2496c22e9e92909b5b195598282bae47f73a07f52940bd2129e955d1549ad5fe +size 124962 diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__init__.py b/parrot/lib/python3.10/site-packages/torch/_library/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a417ec18fef1fab8107bf3ac282dd0beb64361ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/__init__.py @@ -0,0 +1,6 @@ +import torch._library.abstract_impl +import torch._library.autograd +import torch._library.simple_registry +import torch._library.utils + +from torch._library.fake_class_registry import register_fake_class diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84993f446933772ea5331cc811ad90b2bd20d0c8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/autograd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6280799a6b0b0a99204339ec4384751c9d6ea45 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/autograd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/custom_ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/custom_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2437a6dfa3ae9e15ab0449b066d595c121d80d49 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/custom_ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/fake_class_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/fake_class_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02790d0c9d7aa1758510bae3a9ea05c4f812e22f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/fake_class_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/infer_schema.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/infer_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc1dc8dd2f49054d3541c395e576a458df811212 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/infer_schema.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f6de9b65eccac345fa9993653cced737ba92241 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb76a4c52eb5ebf8878cae54264c63378e7c69e4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_library/abstract_impl.py b/parrot/lib/python3.10/site-packages/torch/_library/abstract_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..1f0f4c87bab7aee8186d4ba1a55c77ac2caafa7e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/abstract_impl.py @@ -0,0 +1,209 @@ +# mypy: allow-untyped-defs +import contextlib +import functools +from typing import Callable, Optional +from typing_extensions import deprecated + +import torch +from torch._library.utils import Kernel, RegistrationHandle + + +class AbstractImplHolder: + """A holder where one can register an fake impl to.""" + + def __init__(self, qualname: str): + self.qualname: str = qualname + self.kernel: Optional[Kernel] = None + self.lib: Optional[torch.library.Library] = None + + def register(self, func: Callable, source: str) -> RegistrationHandle: + """Register an fake impl. + + Returns a RegistrationHandle that one can use to de-register this + fake impl. + """ + if self.kernel is not None: + raise RuntimeError( + f"register_fake(...): the operator {self.qualname} " + f"already has an fake impl registered at " + f"{self.kernel.source}." + ) + if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, "Meta"): + raise RuntimeError( + f"register_fake(...): the operator {self.qualname} " + f"already has an DispatchKey::Meta implementation via a " + f"pre-existing torch.library or TORCH_LIBRARY registration. " + f"Please either remove that registration or don't call " + f"register_fake." + ) + + if torch._C._dispatch_has_kernel_for_dispatch_key( + self.qualname, "CompositeImplicitAutograd" + ): + raise RuntimeError( + f"register_fake(...): the operator {self.qualname} " + f"already has an implementation for this device type via a " + f"pre-existing registration to " + f"DispatchKey::CompositeImplicitAutograd." + f"CompositeImplicitAutograd operators do not need an fake " + f"impl; " + f"instead, the operator will decompose into its constituents " + f"and those " + f"can have fake impls defined on them." + ) + + # Store the kernel in this holder + self.kernel = Kernel(func, source) + + # Also register the fake impl to Meta key + if self.lib is None: + ns = self.qualname.split("::")[0] + self.lib = torch.library.Library(ns, "FRAGMENT") + meta_kernel = construct_meta_kernel(self.qualname, self) + self.lib.impl(self.qualname, meta_kernel, "Meta") + + def deregister_fake_class(): + if self.lib: + self.lib._destroy() + self.lib = None + self.kernel = None + + return RegistrationHandle(deregister_fake_class) + + +def construct_meta_kernel( + qualname: str, abstract_impl_holder: AbstractImplHolder +) -> Callable: + assert abstract_impl_holder.kernel is not None + + @functools.wraps(abstract_impl_holder.kernel.func) + def meta_kernel(*args, **kwargs): + assert abstract_impl_holder.kernel is not None + source = abstract_impl_holder.kernel.source + + def error_on_ctx(): + raise RuntimeError( + f"Attempted to call get_ctx() for the meta implementation " + f"for {qualname} (implemented at {source})" + f"You have presumably called get_ctx() because the operator " + f"has a data-dependent output shape; if so, there is no " + f"such meta implementation and this error is the correct " + f"behavior." + ) + + with set_ctx_getter(error_on_ctx): + return abstract_impl_holder.kernel(*args, **kwargs) + + return meta_kernel + + +def get_none(): + return None + + +global_ctx_getter: Callable = get_none + + +@contextlib.contextmanager +def set_ctx_getter(ctx_getter): + global global_ctx_getter + prev = global_ctx_getter + try: + global_ctx_getter = ctx_getter + yield + finally: + global_ctx_getter = prev + + +class AbstractImplCtx: + """ + Context object for writing fake implementations for custom operators. + """ + + def __init__(self, _fake_mode, _op): + self._fake_mode = _fake_mode + self._shape_env = _fake_mode.shape_env + self._op = _op + + @deprecated( + "`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead", + category=FutureWarning, + ) + def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt: + return self.new_dynamic_size(min=min, max=max) + + def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt: + """Constructs a new symint (symbolic int) representing a data-dependent value. + + This is useful for writing the fake implementation (which is necessary + for torch.compile) for a CustomOp where an output Tensor has a size + that depends on the data of the input Tensors. + + Args: + min (int): A statically known inclusive lower bound for this symint. Default: 0 + max (Optional[int]): A statically known inclusive upper bound for this + symint. Default: None + + .. warning: + + It is important that the ``min`` and ``max`` (if not None) values are set + correctly, otherwise, there will be undefined behavior under + torch.compile. The default value of ``min`` is 2 due to torch.compile + specializing on 0/1 sizes. + + You must also verify that your implementation on concrete Tensors + (e.g. CPU/CUDA) only returns Tensors where the size that corresponds + to the symint also has respects these constraint. + The easiest way to do this is to add an assertion in the CPU/CUDA/etc + implementation that the size follows these bounds. + + Example:: + + >>> # An operator with data-dependent output shape + >>> lib = torch.library.Library("mymodule", "FRAGMENT") + >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor") + >>> + >>> @torch.library.register_fake("mymodule::custom_nonzero") + >>> def _(x): + >>> # Number of nonzero-elements is data-dependent. + >>> # Since we cannot peek at the data in an fake impl, + >>> # we use the ctx object to construct a new symint that + >>> # represents the data-dependent size. + >>> ctx = torch.library.get_ctx() + >>> nnz = ctx.new_dynamic_size() + >>> shape = [nnz, x.dim()] + >>> result = x.new_empty(shape, dtype=torch.int64) + >>> return result + >>> + >>> @torch.library.impl(lib, "custom_nonzero", "CPU") + >>> def _(x): + >>> x_np = x.numpy() + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> return torch.tensor(res, device=x.device) + + """ + if ( + self._shape_env is None + or not self._shape_env.allow_dynamic_output_shape_ops + ): + raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op) + + if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt): + raise ValueError( + f"ctx.new_dynamic_size(min={min}, max={max}): expected " + f"min and max to be statically known ints but got SymInt. " + f"This is not supported." + ) + + if min < 0: + raise ValueError( + f"ctx.new_dynamic_size(min={min}, ...): expected min to be " + f"greater than or equal to 0: this API can only create " + f"non-negative sizes." + ) + + result = self._shape_env.create_unbacked_symint() + torch.fx.experimental.symbolic_shapes._constrain_range_for_size( + result, min=min, max=max + ) + return result diff --git a/parrot/lib/python3.10/site-packages/torch/_library/autograd.py b/parrot/lib/python3.10/site-packages/torch/_library/autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..9001948d03d877d3871b8f0bc7f8bac7df714791 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/autograd.py @@ -0,0 +1,226 @@ +# mypy: allow-untyped-defs +import dataclasses +from dataclasses import dataclass +from typing import Any, Callable, Dict, Optional, Protocol + +from .. import _C, _ops, autograd, Tensor + +from ..utils import _pytree +from . import utils + + +class InfoProtocol(Protocol): + _backward_fn: Optional[Callable] + _setup_context_fn: Optional[Callable] + + +@dataclasses.dataclass +class Info: + _backward_fn: Optional[Callable] + _setup_context_fn: Optional[Callable] + + +def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable: + name: str = f"GeneratedBackwardFor_{op._namespace}_{op._opname}_{op._overloadname}" + + has_kwarg_only_args = utils.has_kwarg_only_args(op._schema) + + @dataclass + class Metadata: + keyset: _C.DispatchKeySet + keyword_only_args: Dict[str, Any] + + def forward(ctx, *args): + metadata = args[-1] + args = args[:-1] + + with _C._AutoDispatchBelowAutograd(): + keyset = metadata.keyset + kwargs = metadata.keyword_only_args + result = op.redispatch(keyset & _C._after_autograd_keyset, *args, **kwargs) + if info._setup_context_fn: + # The Dispatcher will remove args that are equal to their default + # values from (args, kwargs). We're going to add it back so that + # the user can access them. + # + # This is OK to do: The Dispatcher removed the args for serialization + # FC/BC reasons (that is, a graph will not store args that are equal + # to their default values), but that doesn't matter here. If the user + # adds a new default arg, then they must update + # their setup_context (along with the rest of their operator + # registrations) + args, kwargs = utils.fill_defaults(op._schema, args, kwargs) + + if has_kwarg_only_args: + info._setup_context_fn( + ctx=ctx, inputs=args, keyword_only_inputs=kwargs, output=result + ) + else: + info._setup_context_fn(ctx=ctx, inputs=args, output=result) + return result + + def backward(ctx, *grads): + if info._backward_fn: + try: + prev_needs_input_grad = ctx.needs_input_grad + ctx.needs_input_grad = ctx.needs_input_grad[:-1] + result = info._backward_fn(ctx, *grads) + finally: + ctx.needs_input_grad = prev_needs_input_grad + if isinstance(result, tuple): + return (*result, None) + return result, None + raise RuntimeError( + f"Trying to backward through {op} but no autograd " + f"formula was registered. " + f"Please use register_autograd to add one." + ) + + Generated = type( + name, + (autograd.Function,), + { + "forward": staticmethod(forward), + "backward": staticmethod(backward), + }, + ) + + schema = op._schema + if any( + utils.is_tensorlist_like_type(a.type) + for a in (*schema.arguments, *schema.returns) + ): + Generated = supports_tensorlist(Generated) + + # The dispatcher passes any keyword-only-args as kwargs and the + # rest of the args (even if specified as kwargs) as args. + def autograd_impl(keyset, *args, **keyword_only_args): + result = Generated.apply(*args, Metadata(keyset, keyword_only_args)) # type: ignore[attr-defined] + return result + + return autograd_impl + + +def supports_tensorlist(cls: Any) -> Any: + """Allows a given autograd.Function class to support List[Tensor] inputs/outputs. + + Regular autograd.Function has a constraint that it only directly supports autograd for + Tensors. Applying @supports_tensorlist enables an autograd.Function to support + autograd for List[Tensor] inputs and outputs. + """ + orig_forward = cls.forward + orig_backward = cls.backward + orig_apply = cls.apply + + @dataclass + class Metadata: + input_spec: spec_t + output_spec: Optional[spec_t] = None + result_is_tuple: Optional[bool] = None + + def new_forward(ctx, *args): + metadata = args[-1] + args = args[:-1] + if not isinstance(metadata, Metadata): + raise NotImplementedError( + "NYI: calling supports_tensorlist autograd.Function.forward directly. " + "You should probably be calling .apply instead. " + "Please file an issue if not." + ) + args = unflatten(list(args), metadata.input_spec) + result = orig_forward(ctx, *args) + metadata.result_is_tuple = isinstance(result, tuple) + if not metadata.result_is_tuple: + result = (result,) + flat_result, output_spec = flatten(result, not_list_of_tensor) + metadata.output_spec = output_spec + + if hasattr(ctx, "_pt_metadata"): + raise RuntimeError( + "Please don't set ctx._pt_metadata; PyTorch uses it to store info" + ) + ctx._pt_metadata = metadata + + return tuple(flat_result) + + def new_backward(ctx, *grads): + if not hasattr(ctx, "_pt_metadata"): + raise NotImplementedError( + "NYI: calling supports_tensorlist autograd.Function.backward directly. " + "This will automatically get called by PyTorch autograd. " + "Please file an issue if you need this." + ) + + metadata = ctx._pt_metadata + grads = unflatten(list(grads), metadata.output_spec) + + # If the user's input is ([x, y, z], w), + # then needs_input_grad is (bool, bool, bool, bool, bool). + # We need to + # 1. get rid of the additional bool (which comes from the extra + # `metadata input`) + # 2. unflatten to get the right structure. + prev_needs_input_grad = ctx.needs_input_grad + try: + ctx.needs_input_grad = unflatten( + list(ctx.needs_input_grad[:-1]), metadata.input_spec + ) + grad_inputs = orig_backward(ctx, *grads) + finally: + ctx.needs_input_grad = prev_needs_input_grad + + if not isinstance(grad_inputs, tuple): + grad_inputs = (grad_inputs,) + # Assume that any Nones in the backward are Tensors. + # If the forward has an arg that is [1, 2, 3], the backward should + # return None as the grad. + # If the forward has an arg that is [tensor, tensor], the backward + # may return [None, None], [grad, None], [None, grad], or [grad, grad]. + flat_grad_inputs, grad_inputs_spec = flatten( + grad_inputs, not_list_of_optional_tensor + ) + if grad_inputs_spec != metadata.input_spec: + raise RuntimeError( + f"Expected the return from backward to be of the same structure " + f"as the inputs. Got: {grad_inputs_spec} (return from backward), " + f"{metadata.input_spec} (inputs)" + ) + return tuple(flat_grad_inputs + [None]) + + def new_apply(*args): + flat_args, input_spec = flatten(args, is_leaf=not_list_of_tensor) + metadata = Metadata(input_spec) + result = orig_apply(*flat_args, metadata) # type: ignore[misc] + assert metadata.output_spec is not None + result = unflatten(list(result), metadata.output_spec) + if not metadata.result_is_tuple: + assert isinstance(result, tuple) + assert len(result) == 1 + return result[0] + return result + + cls.forward = new_forward + cls.backward = new_backward + cls.apply = new_apply + return cls + + +def not_list_of_tensor(tree): + if isinstance(tree, tuple): + return False + if isinstance(tree, list): + return any(not isinstance(l, Tensor) for l in tree) + return True + + +def not_list_of_optional_tensor(tree): + if isinstance(tree, tuple): + return False + if isinstance(tree, list): + return any(l is not None and not isinstance(l, Tensor) for l in tree) + return True + + +flatten = _pytree.tree_flatten +unflatten = _pytree.tree_unflatten +spec_t = _pytree.TreeSpec diff --git a/parrot/lib/python3.10/site-packages/torch/_library/custom_ops.py b/parrot/lib/python3.10/site-packages/torch/_library/custom_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ce692f16a097c1a29dc35098052eb3448e8e866f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/custom_ops.py @@ -0,0 +1,573 @@ +# mypy: allow-untyped-defs +import inspect +import weakref +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, +) + +from torch.utils._exposed_in import exposed_in + +from .. import _C, _library, _ops, autograd, library, Tensor +from . import utils + + +device_types_t = Optional[Union[str, Sequence[str]]] + + +@exposed_in("torch.library") +def custom_op( + name: str, + fn: Optional[Callable] = None, + /, + *, + mutates_args: Iterable[str], + device_types: device_types_t = None, + schema: Optional[str] = None, +) -> Callable: + """Wraps a function into custom operator. + + Reasons why you may want to create a custom op include: + - Wrapping a third-party library or custom kernel to work with PyTorch + subsystems like Autograd. + - Preventing torch.compile/export/FX tracing from peeking inside your function. + + This API is used as a decorator around a function (please see examples). + The provided function must have type hints; these are needed to interface + with PyTorch's various subsystems. + + Args: + name (str): A name for the custom op that looks like "{namespace}::{name}", + e.g. "mylib::my_linear". The name is used as the op's stable identifier + in PyTorch subsystems (e.g. torch.export, FX graphs). + To avoid name collisions, please use your project name as the namespace; + e.g. all custom ops in pytorch/fbgemm use "fbgemm" as the namespace. + mutates_args (Iterable[str]): The names of args that the function mutates. + This MUST be accurate, otherwise, the behavior is undefined. + device_types (None | str | Sequence[str]): The device type(s) the function + is valid for. If no device type is provided, then the function + is used as the default implementation for all device types. + Examples: "cpu", "cuda". + schema (None | str): A schema string for the operator. If None + (recommended) we'll infer a schema for the operator from its type + annotations. We recommend letting us infer a schema unless you + have a specific reason not to. + Example: "(Tensor x, int y) -> (Tensor, Tensor)". + + .. note:: + We recommend not passing in a ``schema`` arg and instead letting us infer + it from the type annotations. It is error-prone to write your own schema. + You may wish to provide your own schema if our interpretation of + the type annotation is not what you want. + For more info on how to write a schema string, see + `here `_ + + Examples:: + >>> import torch + >>> from torch import Tensor + >>> from torch.library import custom_op + >>> import numpy as np + >>> + >>> @custom_op("mylib::numpy_sin", mutates_args=()) + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> x_np = x.cpu().numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> x = torch.randn(3) + >>> y = numpy_sin(x) + >>> assert torch.allclose(y, x.sin()) + >>> + >>> # Example of a custom op that only works for one device type. + >>> @custom_op("mylib::numpy_sin_cpu", mutates_args=(), device_types="cpu") + >>> def numpy_sin_cpu(x: Tensor) -> Tensor: + >>> x_np = x.numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np) + >>> + >>> x = torch.randn(3) + >>> y = numpy_sin_cpu(x) + >>> assert torch.allclose(y, x.sin()) + >>> + >>> # Example of a custom op that mutates an input + >>> @custom_op("mylib::numpy_sin_inplace", mutates_args={"x"}, device_types="cpu") + >>> def numpy_sin_inplace(x: Tensor) -> None: + >>> x_np = x.numpy() + >>> np.sin(x_np, out=x_np) + >>> + >>> x = torch.randn(3) + >>> expected = x.sin() + >>> numpy_sin_inplace(x) + >>> assert torch.allclose(x, expected) + + """ + + def inner(fn): + import torch + + if schema is None: + import torch._custom_op.impl + + schema_str = torch._custom_op.impl.infer_schema(fn, mutates_args) + else: + schema_str = schema + namespace, opname = name.split("::") + result = CustomOpDef(namespace, opname, schema_str, fn) + if schema is not None: + # Check that schema's alias annotations match those of `mutates_args`. + expected = set() + for arg in result._opoverload._schema.arguments: + if arg.alias_info is not None and arg.alias_info.is_write: + expected.add(arg.name) + if expected != set(mutates_args): + raise ValueError( + f"Attempted to create a custom op with `mutates_args={mutates_args}` " + f"and `schema={schema}. The schema suggests that the op mutates {expected}" + f"which is different from what was provided to us in `mutates_args`. " + f"Please make these consistent." + ) + result.register_kernel(device_types)(fn) + return result + + if fn is None: + return inner + return inner(fn) + + +class CustomOpDef: + """CustomOpDef is a wrapper around a function that turns it into a custom op. + + It has various methods for registering additional behavior for this + custom op. + + You should not instantiate CustomOpDef directly; instead, use the + :func:`torch.library.custom_op` API. + """ + + def __init__(self, namespace: str, name: str, schema: str, fn: Callable) -> None: + # Fields used to interface with the PyTorch dispatcher + self._namespace = namespace + self._name = name + self._schema = schema + + self._init_fn = fn + + self._backend_fns: Dict[Union[str, None], Callable] = {} + self._abstract_fn: Optional[Callable] = None + self._setup_context_fn: Optional[Callable] = None + self._backward_fn: Optional[Callable] = None + + self._lib = get_library_allowing_overwrite(self._namespace, self._name) + self._register_to_dispatcher() + OPDEFS[self._qualname] = self + + @property + def _qualname(self) -> str: + return f"{self._namespace}::{self._name}" + + def __repr__(self) -> str: + return f"" + + def register_kernel( + self, device_types: device_types_t, fn: Optional[Callable] = None, / + ) -> Callable: + """Register an implementation for a device type for this operator. + + Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu". + This API may be used as a decorator. + + Args: + fn (Callable): The function to register as the implementation for + the given device types. + device_types (str | Sequence[str]): The device device_types to register an impl to. + + Examples:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import torch + >>> from torch import Tensor + >>> from torch.library import custom_op + >>> import numpy as np + >>> + >>> # Create a custom op that works on cpu + >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> x_np = x.numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np) + >>> + >>> # Add implementations for the cuda device + >>> @numpy_sin.register_kernel("cuda") + >>> def _(x): + >>> x_np = x.cpu().numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> x_cpu = torch.randn(3) + >>> x_cuda = x_cpu.cuda() + >>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin()) + >>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin()) + + """ + + def inner(fn): + if device_types is None or isinstance(device_types, str): + dtypes: List[Union[str, None]] = [device_types] + else: + dtypes = list(device_types) + for device_type in dtypes: + if device_type not in self._backend_fns: + + def backend_impl(*args, **kwargs): + # Checks the assumption that outputs cannot alias + # inputs or other outputs. + storages = { + id(tensor.untyped_storage()) + for tensor in iter_tensors(args, kwargs) + } + + result = self._backend_fns[device_type](*args, **kwargs) + + tuple_result = result + if not isinstance(result, tuple): + tuple_result = (result,) + for tensor in iter_tensors(tuple_result, {}): + key = id(tensor.untyped_storage()) + if id(tensor.untyped_storage()) in storages: + fn = self._backend_fns[device_type] + module = inspect.getmodule(fn) + raise RuntimeError( + f"Tensors returned from custom ops (1) must not " + f"be inputs to the custom op and (2) may not alias " + f"any inputs or other returns. Please clone the " + f"the offending output tensors (e.g. output.clone()) " + f"or refactor your code. " + f"Offending op: {self._name} (with implementation in {module})" + ) + storages.add(key) + return result + + if device_type is None: + self._lib.impl( + self._name, backend_impl, "CompositeExplicitAutograd" + ) + else: + self._lib.impl( + self._name, + backend_impl, + _C._dispatch_key_for_device(device_type), + ) + self._backend_fns[device_type] = fn + return fn + + # See NOTE: [Supporting decorator and non-decorator usage] + if fn is None: + return inner + return inner(fn) + + def register_fake(self, fn: Callable, /) -> Callable: + r"""Register a FakeTensor implementation for this custom op. + + This is necessary to get the operator to work efficiently with torch.compile. + + The Fake impl (sometimes also known as a meta kernel or abstract impl) + specifies the behavior of this operator on Tensors that carry no data. + Given some input Tensors with certain properties + (sizes/strides/storage_offset/device), it specifies what the properties of + the output Tensors are. + + Please see :func:`torch.library.impl_abstract` for more details. + + Args: + fn (Callable): The function to register as the FakeTensor + implementation. + + Examples: + >>> import torch + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Example 1: an operator without data-dependent output shape + >>> @torch.library.custom_op("mylib::linear", mutates_args=()) + >>> def linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: + >>> return (x @ weight.t()) + bias + >>> + >>> @linear.register_fake + >>> def _(x, weight, bias): + >>> assert x.dim() == 2 + >>> assert weight.dim() == 2 + >>> assert bias.dim() == 1 + >>> assert x.shape[1] == weight.shape[1] + >>> assert weight.shape[0] == bias.shape[0] + >>> assert x.device == weight.device + >>> return x.new_empty(x.size(0), weight.size(0)) + >>> + >>> x = torch.randn(2, 2) + >>> weight = torch.randn(2, 2) + >>> bias = torch.randn(2) + >>> # xdoctest: +SKIP("Requires Python <= 3.11") + >>> out = torch.compile(linear, fullgraph=True)(x, weight, bias) + >>> # xdoctest: +SKIP("Requires Python <= 3.11") + >>> assert torch.allclose(out, torch.nn.functional.linear(x, weight, bias)) + >>> + >>> # Example 2: an operator with data-dependent output shape + >>> @torch.library.custom_op("mylib::nonzero", mutates_args=()) + >>> def nonzero(x: Tensor) -> Tensor: + >>> x_np = x.cpu().numpy() + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> return torch.tensor(res, device=x.device) + >>> + >>> @nonzero.register_fake + >>> def _(x): + >>> # Number of nonzero-elements is data-dependent. + >>> # Since we cannot peek at the data in an abstract impl, + >>> # we use the ctx object to construct a new symint that + >>> # represents the data-dependent size. + >>> ctx = torch.library.get_ctx() + >>> nnz = ctx.new_dynamic_size() + >>> shape = [nnz, x.dim()] + >>> result = x.new_empty(shape, dtype=torch.int64) + >>> return result + >>> + >>> x = torch.tensor([0, 1, 2, 0, 0, 1]) + >>> # xdoctest: +SKIP("Requires Python <= 3.11") + >>> out = torch.compile(nonzero, fullgraph=True)(x) + >>> # xdoctest: +SKIP("Requires Python <= 3.11") + >>> assert torch.allclose(out, x.nonzero()) + + """ + self._abstract_fn = fn + return fn + + def register_autograd( + self, + backward: Callable, + /, + *, + setup_context: Optional[Callable] = None, + ) -> None: + r"""Register a backward formula for this custom op. + + In order for an operator to work with autograd, you need to register + a backward formula: + 1. You must tell us how to compute gradients during the backward pass + by providing us a "backward" function. + 2. If you need any values from the forward to compute gradients, you can + use `setup_context` to save values for backward. + + ``backward_fn`` runs during the backward pass. It accepts ``(ctx, *grads)``: + - ``grads`` is one or more gradients. The number of gradients matches + the number of outputs of the operator. + The ``ctx`` object is `the same ctx object `_ used by + :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the + same as :meth:`torch.autograd.Function.backward`. + + ``setup_context(ctx, inputs, output)`` runs during the forward pass. + Please save quantities needed for backward onto the ``ctx`` object via + either :meth:`torch.autograd.function.FunctionCtx.save_for_backward` + or assigning them as attributes of ``ctx``. If your custom op has + kwarg-only arguments, we expect the signature of ``setup_context`` + to be ``setup_context(ctx, inputs, keyword_only_inputs, output)``. + + Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is, + they may not directly access :meth:`torch.Tensor.data_ptr` and they must + not depend on or mutate global state. If you need a non-traceable backward, + you can make it a separate custom_op that you call inside ``backward_fn``. + + Examples: + >>> import torch + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=()) + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> x_np = x.cpu().numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> def setup_context(ctx, inputs, output) -> Tensor: + >>> x, = inputs + >>> ctx.save_for_backward(x) + >>> + >>> def backward(ctx, grad): + >>> x, = ctx.saved_tensors + >>> return grad * x.cos() + >>> + >>> numpy_sin.register_autograd(backward, setup_context=setup_context) + >>> + >>> x = torch.randn(3, requires_grad=True) + >>> y = numpy_sin(x) + >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y)) + >>> assert torch.allclose(grad_x, x.cos()) + >>> + >>> # Example with a keyword-only arg + >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=()) + >>> def numpy_mul(x: Tensor, *, val: float) -> Tensor: + >>> x_np = x.cpu().numpy() + >>> y_np = x_np * val + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> def setup_context(ctx, inputs, keyword_only_inputs, output) -> Tensor: + >>> ctx.val = keyword_only_inputs["val"] + >>> + >>> def backward(ctx, grad): + >>> return grad * ctx.val + >>> + >>> numpy_mul.register_autograd(backward, setup_context=setup_context) + >>> + >>> x = torch.randn(3, requires_grad=True) + >>> y = numpy_mul(x, val=3.14) + >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y)) + >>> assert torch.allclose(grad_x, torch.full_like(x, 3.14)) + + """ + schema = self._opoverload._schema + if not _library.utils.is_functional_schema(schema): + raise RuntimeError( + f"Cannot register autograd formula for non-functional operator " + f"{self} with schema {schema}. Please create " + f"a functional operator and register an autograd formula for that." + ) + + self._backward_fn = backward + self._setup_context_fn = setup_context + + def _register_to_dispatcher(self) -> None: + lib = self._lib + schema_str = self._name + self._schema + cpp_schema = _C.parse_schema(schema_str) + if utils.has_kwarg_only_tensors(cpp_schema): + # If you want to support this, the progression is: + # - supporting kwarg-only Tensors that are non-differentiable + # - supporting kwarg-only Tensors (regardless of differentiability) + raise NotImplementedError( + f"custom_op with kwarg-only Tensor args. Please make your " + f"tensors not kwarg-only. Got: {schema_str}" + ) + + lib.define( + schema_str, + tags=[_C.Tag.pt2_compliant_tag, _C.Tag.needs_fixed_stride_order], + ) + self._opoverload = _library.utils.lookup_op(self._qualname) + + def fake_impl(*args, **kwargs): + if self._abstract_fn is None: + if _library.utils.can_generate_trivial_fake_impl(self._opoverload): + return None + raise RuntimeError( + f"There was no fake impl registered for {self}. " + f"This is necessary for torch.compile/export/fx tracing to work. " + f"Please use `{self._init_fn.__name__}.register_fake` to add an " + f"fake impl." + ) + return self._abstract_fn(*args, **kwargs) + + lib._register_fake(self._name, fake_impl, _stacklevel=4) + + autograd_impl = _library.autograd.make_autograd_impl(self._opoverload, self) + lib.impl(self._name, autograd_impl, "Autograd", with_keyset=True) + + schema = self._opoverload._schema + if schema.is_mutable: + + def adinplaceorview_impl(keyset, *args, **kwargs): + for arg, val in _library.utils.zip_schema(schema, args, kwargs): + if not arg.alias_info: + continue + if not arg.alias_info.is_write: + continue + if isinstance(val, Tensor): + autograd.graph.increment_version(val) + elif isinstance(val, (tuple, list)): + for v in val: + if isinstance(v, Tensor): + autograd.graph.increment_version(v) + with _C._AutoDispatchBelowADInplaceOrView(): + return self._opoverload.redispatch( + keyset & _C._after_ADInplaceOrView_keyset, *args, **kwargs + ) + + lib.impl( + self._name, + adinplaceorview_impl, + "ADInplaceOrView", + with_keyset=True, + ) + + def __call__(self, *args, **kwargs): + return self._opoverload(*args, **kwargs) + + +# NOTE: [Supporting decorator and non-decorator usage] +# +# Some APIs may be both used as a decorator and not as a decorator. +# For example: +# +# >>> def fn(x): +# >>> return x.sin() +# >>> +# >>> # Usage 1: not as a decorator +# >>> numpy_sin.register_kernel("cuda", fn) +# >>> +# >>> # Usage 2: as a decorator +# >>> @numpy_sin.register_kernel("cuda") +# >>> def fn2(x): +# >>> return x.sin +# +# The way we support this is that `register_kernel` accepts an optional `fn`. +# If `fn` is provided (Usage 1), then we know that the user is using it not +# as a decorator. +# If `fn` is not provided (Usage 2), then `register_kernel` needs to return a +# decorator. + + +OPDEF_TO_LIB: Dict[str, "library.Library"] = {} +OPDEFS: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + + +def get_library_allowing_overwrite(namespace: str, name: str) -> "library.Library": + qualname = f"{namespace}::{name}" + + if qualname in OPDEF_TO_LIB: + OPDEF_TO_LIB[qualname]._destroy() + del OPDEF_TO_LIB[qualname] + + lib = library.Library(namespace, "FRAGMENT") + OPDEF_TO_LIB[qualname] = lib + return lib + + +def iter_tensors( + args: Tuple[Any], kwargs: Dict[str, Any], allowed_nesting: int = 1 +) -> Iterator[Tensor]: + def check(arg): + if isinstance(arg, Tensor): + yield arg + elif allowed_nesting > 0 and isinstance(arg, (tuple, list)): + yield from iter_tensors(tuple(arg), {}, allowed_nesting - 1) + + for arg in args: + yield from check(arg) + for kwarg in kwargs.values(): + yield from check(kwarg) + + +def _maybe_get_opdef( + op: Union[CustomOpDef, _ops.OpOverload, str] +) -> Optional[CustomOpDef]: + if isinstance(op, CustomOpDef): + return op + if isinstance(op, _ops.OpOverload): + op = op._name + assert isinstance(op, str) + if op in OPDEFS: + return OPDEFS[op] + return None diff --git a/parrot/lib/python3.10/site-packages/torch/_library/fake_class_registry.py b/parrot/lib/python3.10/site-packages/torch/_library/fake_class_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..f206b68fc3be2bb7975b1e52d6c305a284699bbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/fake_class_registry.py @@ -0,0 +1,293 @@ +# mypy: allow-untyped-defs +import logging +from typing import Any, Dict, Optional, Protocol, Tuple + +import torch + +from torch._library.utils import parse_namespace + +log = logging.getLogger(__name__) + + +class FakeScriptObject: + def __init__(self, wrapped_obj: Any, script_class_name: str): + self.wrapped_obj = wrapped_obj + + # The fully qualified name of the class of original script object + self.script_class_name = script_class_name + + +class FakeScriptMethod: + def __init__( + self, + self_fake_obj: FakeScriptObject, + method_name: str, + schema: Optional[torch.FunctionSchema], + ): + self.self_fake_obj = self_fake_obj + self.method_name = method_name + self.schema = schema + + def __call__(self, *args, **kwargs): + from torch._higher_order_ops.torchbind import call_torchbind + + return call_torchbind(self.self_fake_obj, self.method_name, *args, **kwargs) + + +class HasStaticMethodFromReal(Protocol): + @classmethod + def from_real(cls, real_obj: torch.ScriptObject): + pass + + +class FakeClassRegistry: + def __init__(self): + self._registered_class: Dict[str, Any] = {} + + def has_impl(self, full_qualname: str) -> bool: + return full_qualname in self._registered_class + + def get_impl(self, full_qualname: str) -> Any: + self._check_registered(full_qualname) + return self._registered_class[full_qualname] + + def register(self, full_qualname: str, fake_class=None) -> None: + if self.has_impl(full_qualname): + log.warning( + "%s is already registered. Previous fake class is overrided with %s.", + full_qualname, + fake_class, + ) + self._registered_class[full_qualname] = fake_class + + def deregister(self, full_qualname: str) -> Any: + if not self.has_impl(full_qualname): + log.warning( + "Cannot deregister %s. Please use register_fake_class to register it first." + " Or do you dereigster it twice?", + full_qualname, + ) + else: + return self._registered_class.pop(full_qualname) + + def clear(self) -> None: + self._registered_class.clear() + + def _check_registered(self, full_qualname: str) -> None: + if full_qualname not in self._registered_class: + raise RuntimeError( + f"{full_qualname} is not registered. Please use register_fake_class to register it first." + ) + + +global_fake_class_registry = FakeClassRegistry() + + +# TODO: add this check at compile time for __obj_flatten__. +def _check_valid_flat_script_obj(flat_x): + if not isinstance(flat_x, tuple): + raise RuntimeError("Expect flat x to be a tuple.") + + for tp in flat_x: + if not isinstance(tp, tuple): + raise RuntimeError("Expect flat x to be a tuple of tuples.") + + if not len(tp) == 2 or not isinstance(tp[0], str): + raise RuntimeError( + "Expect element of flat x to be a tuple of two elements with first element being a string" + ) + + +def to_fake_obj(fake_mode, x: torch.ScriptObject) -> FakeScriptObject: + import torch.utils._pytree as pytree + + flat_x = x.__obj_flatten__() # type: ignore[attr-defined] + + _check_valid_flat_script_obj(flat_x) + + fake_flattened = pytree.tree_map_only( + torch.Tensor, + lambda t: fake_mode.from_tensor(t), + flat_x, + ) + + fake_x = _find_fake_class_for_script_object(x).__obj_unflatten__(fake_flattened) + + fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name()) # type: ignore[attr-defined] + + for name in x._method_names(): # type: ignore[attr-defined] + attr = getattr(fake_x, name, None) + if attr: + if not callable(attr): + raise RuntimeError(f"Expect {name} to be a callable but got {attr}.") + + real_attr = getattr(x, name) # type: ignore[attr-defined] + + # real attr sometimes is not torch.ScriptMethod thus doesn't have schema e.g. __init___ or __eq__ + method_schema: Optional[torch.FunctionSchema] = None + if isinstance(real_attr, torch.ScriptMethod): + method_schema = real_attr.schema # type: ignore[attr-defined] + + setattr( + fake_x_wrapped, + name, + FakeScriptMethod(fake_x_wrapped, name, method_schema), + ) + else: + log.warning("fake object of %s doesn't implement method %s.", x, name) + return fake_x_wrapped + + +def register_fake_class(qualname, fake_class: Optional[HasStaticMethodFromReal] = None): + r"""Register a fake implementation for this class. + + It's in the same spirit of registering a fake implementation for + an operator but with the difference that it + associates a fake class with the original torch bind class (registered + with torch::class_). In this way, torch.compile can handle them properly + in components such as Dynamo and AOTAutograd. + + This API may be used as a decorator (see example). For the fake class, users + are required to provide a from_real classmethod that takes a real object and + returns an instance of the fake class. All tensors in the fake object should also + be properly fakified with to_fake_tensor() in from_real. + + + Examples: + # For a custom class Foo defined in test_custom_class_registration.cpp: + + TORCH_LIBRARY(_TorchScriptTesting, m) { + m.class_("_TensorQueue") + .def(torch::init()) + .def("push", &TensorQueue::push) + .def("pop", &TensorQueue::pop) + .def("top", &TensorQueue::top) + .def("size", &TensorQueue::size) + .def("clone_queue", &TensorQueue::clone_queue) + .def("__obj_flatten__", &TensorQueue::__obj_flatten__) + .def_pickle( + // __getstate__ + [](const c10::intrusive_ptr& self) + -> c10::Dict { + return self->serialize(); + }, + // __setstate__ + [](c10::Dict data) + -> c10::intrusive_ptr { + return c10::make_intrusive(std::move(data)); + }); + }; + # We could register a fake class FakeTensorQueue in Python as follows: + import torch + + @torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue") + class FakeTensorQueue: + def __init__(self, queue): + self.queue = queue + + @classmethod + def __obj_unflatten__(cls, flattened_ctx): + return cls(**dict(ctx)) + + def push(self, x): + self.queue.append(x) + + def pop(self): + return self.queue.pop(0) + + def size(self): + return len(self.queue) + + In this example, the original TensorQeue need to addd a __obj_flatten__ method + to the class TensorQueue and the flattend result is passed into FakeTensorQueue's + __obj_unflatten__ as inputs to create a fake class. This protocol allows pytorch to look + at the contents of the script object and properly handle them in the subsystems + like dynamo, aot_aotugrad or more. + """ + + def inner(fake_class: HasStaticMethodFromReal): + ns, name = parse_namespace(qualname) + + # This also checks whether the refered torch::class_ exists. + torchbind_class = torch._C._get_custom_class_python_wrapper(ns, name) + + from_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None) + if not from_method: + raise RuntimeError( + f"{fake_class} doesn't define a classmethod {_CONVERT_FROM_REAL_NAME}." + ) + + if not isinstance(fake_class.__dict__[_CONVERT_FROM_REAL_NAME], classmethod): + raise RuntimeError( + f"{_CONVERT_FROM_REAL_NAME} method is not a classmethod." + ) + + global_fake_class_registry.register(_full_qual_class_name(qualname), fake_class) + return fake_class + + if fake_class is None: + return inner + return inner(fake_class) + + +def deregister_fake_class(qualname): + return global_fake_class_registry.deregister(_full_qual_class_name(qualname)) + + +def has_fake_class(full_qualname) -> bool: + return global_fake_class_registry.has_impl(full_qualname) + + +def find_fake_class(full_qualname) -> Optional[Any]: + if not has_fake_class(full_qualname): + return None + return global_fake_class_registry.get_impl(full_qualname) + + +def _full_qual_class_name(qualname: str) -> str: + ns, name = parse_namespace(qualname) + return "__torch__.torch.classes." + ns + "." + name + + +# Return the namespace and class name from fully qualified name. +def _ns_and_class_name(full_qualname: str) -> Tuple[str, str]: + splits = full_qualname.split(".") + assert len(splits) == 5 + _torch, torch_ns, classes, ns, class_name = splits + return ns, class_name + + +def _find_fake_class_for_script_object(x: torch.ScriptObject) -> Any: + full_qualname = x._type().qualified_name() # type: ignore[attr-defined] + ns, class_name = _ns_and_class_name(full_qualname) + fake_class = find_fake_class(full_qualname) + if fake_class is None: + raise RuntimeError( + f" ScriptObject's {full_qualname} haven't registered a fake class." + f" Please use register_fake_class({ns}::{class_name}) to annotate a fake class for the script obj." + f" Specifically, create a python class that implements a fake version for all the methods" + f" that're used in the program and put annotated class in the program e.g. after loading the library." + f" The fake methods can be written in the same way as a meta kernel for an operator but need to additionally" + f" simulate the object's states. Be sure to add a {_CONVERT_FROM_REAL_NAME} classmethod" + f" to enable creating a fake obj from a real one." + ) + return fake_class + + +_CONVERT_FROM_REAL_NAME = "__obj_unflatten__" + + +def _fake_obj_from_real(fake_mode, x) -> Any: + fake_class = _find_fake_class_for_script_object(x) + + from_real_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None) + if not from_real_method: + raise RuntimeError( + f"{fake_class} must define a classmethod {_CONVERT_FROM_REAL_NAME}" + f" that converts the real object to the fake object." + ) + + # from_real defined by user need the ctx to fakify the tensor states. + ctx = torch._library.abstract_impl.AbstractImplCtx(fake_mode, None) + with torch._library.abstract_impl.set_ctx_getter(lambda: ctx): + return fake_class.from_real(x) diff --git a/parrot/lib/python3.10/site-packages/torch/_library/infer_schema.py b/parrot/lib/python3.10/site-packages/torch/_library/infer_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..6305375e4433d3249f85fd1c8c4aa7cec7dac61f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/infer_schema.py @@ -0,0 +1,164 @@ +# mypy: allow-untyped-defs +import inspect +import typing + +from .. import device, dtype, Tensor, types + + +def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str: + """Given a function with type hints, parses a schema. + + We make some assumptions to make our lives easier that correspond to how people + write custom ops in real life: + - none of the outputs alias any of the inputs or each other. + - only the args listed in mutates_args are being mutated. + + Callers (e.g. the custom ops API) are responsible for checking these assumptions. + """ + sig = inspect.signature(prototype_function) + + def error_fn(what): + raise ValueError( + f"infer_schema(func): {what} " f"Got func with signature {sig})" + ) + + params = [] + seen_args = set() + saw_kwarg_only_arg = False + for idx, (name, param) in enumerate(sig.parameters.items()): + if not supported_param(param): + error_fn("We do not support positional-only args, varargs, or varkwargs.") + + if param.kind == inspect.Parameter.KEYWORD_ONLY: + # The first time we see a kwarg-only arg, add "*" to the schema. + if not saw_kwarg_only_arg: + params.append("*") + saw_kwarg_only_arg = True + + if param.annotation is inspect.Parameter.empty: + error_fn(f"Parameter {name} must have a type annotation.") + + if param.annotation not in SUPPORTED_PARAM_TYPES.keys(): + error_fn( + f"Parameter {name} has unsupported type {param.annotation}. " + f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}." + ) + + schema_type = SUPPORTED_PARAM_TYPES[param.annotation] + if name in mutates_args: + if not schema_type.startswith("Tensor"): + error_fn( + f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated" + ) + schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}" + seen_args.add(name) + if param.default is inspect.Parameter.empty: + params.append(f"{schema_type} {name}") + else: + if param.default is not None and not isinstance( + param.default, (int, float, bool) + ): + error_fn( + f"Parameter {name} has an unsupported default value (we only support " + f"int, float, bool, None). Please file an issue on GitHub so we can " + f"prioritize this." + ) + params.append(f"{schema_type} {name}={param.default}") + mutates_args_not_seen = set(mutates_args) - seen_args + if len(mutates_args_not_seen) > 0: + error_fn( + f"{mutates_args_not_seen} in mutates_args were not found in " + f"the custom op's signature. " + f"mutates_args should contain the names of all args that the " + f"custom op mutates." + ) + ret = parse_return(sig.return_annotation, error_fn) + return f"({', '.join(params)}) -> {ret}" + + +def derived_types( + base_type, cpp_type, list_base, optional_base_list, optional_list_base +): + result = [ + (base_type, cpp_type), + (typing.Optional[base_type], f"{cpp_type}?"), + ] + + def derived_seq_types(typ): + return [ + typing.Sequence[typ], # type: ignore[valid-type] + typing.List[typ], # type: ignore[valid-type] + ] + + if list_base: + for seq_typ in derived_seq_types(base_type): + result.append((seq_typ, f"{cpp_type}[]")) # type: ignore[valid-type] + if optional_base_list: + for seq_typ in derived_seq_types(typing.Optional[base_type]): + result.append((seq_typ, f"{cpp_type}?[]")) # type: ignore[valid-type] + if optional_list_base: + for seq_typ in derived_seq_types(base_type): # type: ignore[valid-type] + result.append((typing.Optional[seq_typ], f"{cpp_type}[]?")) # type: ignore[valid-type] + return result + + +def get_supported_param_types(): + data = [ + # (python type, schema type, type[] variant, type?[] variant, type[]? variant + (Tensor, "Tensor", True, True, False), + (int, "SymInt", True, False, True), + (float, "float", True, False, True), + (bool, "bool", True, False, True), + (str, "str", False, False, False), + (types.Number, "Scalar", True, False, False), + (dtype, "ScalarType", False, False, False), + (device, "Device", False, False, False), + ] + result = [] + for line in data: + result.extend(derived_types(*line)) + return dict(result) + + +SUPPORTED_RETURN_TYPES = { + Tensor: "Tensor", + typing.List[Tensor]: "Tensor[]", + int: "SymInt", + float: "float", + bool: "bool", + types.Number: "Scalar", +} + + +def parse_return(annotation, error_fn): + if annotation is None: + return "()" + + origin = typing.get_origin(annotation) + if origin is not tuple: + if annotation not in SUPPORTED_RETURN_TYPES.keys(): + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + return SUPPORTED_RETURN_TYPES[annotation] + + args = typing.get_args(annotation) + for arg in args: + if arg not in SUPPORTED_RETURN_TYPES: + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + + return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")" + + +SUPPORTED_PARAM_TYPES = get_supported_param_types() + + +def supported_param(param: inspect.Parameter) -> bool: + return param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/_library/simple_registry.py b/parrot/lib/python3.10/site-packages/torch/_library/simple_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..65ecf8ef0d75dcb51e39204740550ac91c6848dd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/simple_registry.py @@ -0,0 +1,44 @@ +# mypy: allow-untyped-defs +from .abstract_impl import AbstractImplHolder + +__all__ = ["SimpleLibraryRegistry", "SimpleOperatorEntry", "singleton"] + + +class SimpleLibraryRegistry: + """Registry for the "simple" torch.library APIs + + The "simple" torch.library APIs are a higher-level API on top of the + raw PyTorch DispatchKey registration APIs that includes: + - fake impl + + Registrations for these APIs do not go into the PyTorch dispatcher's + table because they may not directly involve a DispatchKey. For example, + the fake impl is a Python function that gets invoked by FakeTensor. + Instead, we manage them here. + + SimpleLibraryRegistry is a mapping from a fully qualified operator name + (including the overload) to SimpleOperatorEntry. + """ + + def __init__(self): + self._data = {} + + def find(self, qualname: str) -> "SimpleOperatorEntry": + if qualname not in self._data: + self._data[qualname] = SimpleOperatorEntry(qualname) + return self._data[qualname] + + +singleton: SimpleLibraryRegistry = SimpleLibraryRegistry() + + +class SimpleOperatorEntry: + """This is 1:1 to an operator overload. + + The fields of SimpleOperatorEntry are Holders where kernels can be + registered to. + """ + + def __init__(self, qualname: str): + self.qualname: str = qualname + self.abstract_impl: AbstractImplHolder = AbstractImplHolder(qualname) diff --git a/parrot/lib/python3.10/site-packages/torch/_library/utils.py b/parrot/lib/python3.10/site-packages/torch/_library/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..27d1ef92b5b3dc9bd289207c07f888323189ec3a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_library/utils.py @@ -0,0 +1,258 @@ +# mypy: allow-untyped-defs +import dataclasses +import inspect +import sys +from typing import Any, Callable, Dict, Iterable, Tuple + +import torch +import torch._utils_internal as _utils_internal +from torch import _C + + +@dataclasses.dataclass +class Kernel: + """Models a (function, source location)""" + + func: Callable + source: str + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + +class RegistrationHandle: + """Does something when someone calls .destroy() on it""" + + def __init__(self, on_destroy: Callable): + self._on_destroy = on_destroy + + def destroy(self) -> None: + self._on_destroy() + + +def get_source(stacklevel: int) -> str: + """Get a string that represents the caller. + + Example: "/path/to/foo.py:42" + + Use stacklevel=1 to get the caller's source + Use stacklevel=2 to get the caller's caller's source + etc. + """ + frame = inspect.getframeinfo(sys._getframe(stacklevel)) + source = f"{frame.filename}:{frame.lineno}" + return source + + +def parse_namespace(qualname: str) -> Tuple[str, str]: + splits = qualname.split("::") + if len(splits) != 2: + raise ValueError( + f"Expected `qualname` to be of the form " + f'"namespace::name", but got {qualname}. ' + f"The qualname passed to the torch.library APIs must consist " + f"of a namespace and a name, e.g. aten::sin" + ) + return splits[0], splits[1] + + +def lookup_op(qualname: str) -> torch._ops.OpOverload: + namespace, name = parse_namespace(qualname) + if "." in name: + name, overload = name.split(".") + else: + overload = "default" + ns = getattr(torch.ops, namespace) + packet = getattr(ns, name) + return getattr(packet, overload) + + +def is_builtin(op: torch._ops.OpOverload) -> bool: + assert isinstance(op, torch._ops.OpOverload) + return op.namespace in {"aten", "prim", "prims"} + + +def is_functional_schema(schema: Any) -> bool: + """Check if the schema is functional. + + An operator is functional if: + - it does not mutate any of its inputs + - it does not return a view on any of its inputs + - it has at least one return + """ + + def is_functional(schema): + if schema.is_mutable: + return False + rets = schema.returns + is_non_mutating_view = len(rets) > 0 and any( + r.alias_info is not None and not r.alias_info.is_write for r in rets + ) + if is_non_mutating_view: + return False + if not schema.returns: + return False + return True + + if isinstance(schema, torch._C.FunctionSchema): + return is_functional(schema) + + # Lazy import because not all PyTorch builds have torchgen + from torchgen.model import FunctionSchema + + if isinstance(schema, str): + schema = FunctionSchema.parse(schema) + assert isinstance(schema, FunctionSchema) + return is_functional(schema) + + +# should be torch._C.JitType but that annotation is busted +def is_tensorlist_like_type(typ: Any) -> bool: + return ( + typ == _C.ListType(_C.TensorType.get()) + or typ == _C.ListType(_C.OptionalType(_C.TensorType.get())) + or typ == _C.OptionalType(_C.ListType(_C.TensorType.get())) + or typ == _C.OptionalType(_C.ListType(_C.OptionalType(_C.TensorType.get()))) + ) + + +# should be torch._C.JitType but that annotation is busted +def is_tensor_like_type(typ: Any) -> bool: + return typ == _C.TensorType.get() or typ == _C.OptionalType(_C.TensorType.get()) + + +def mutates_and_returns_first_arg(op: torch._ops.OpOverload): + """Check if an op is an inplace aten op, i.e. it mutates and returns the first arg. + + TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this, + but not all PyTorch builds have torchgen (due to the yaml dependency being weird). + Figure this out. + + Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a) + """ + if op.namespace != "aten": + return False + schema = op._schema + if not len(schema.returns) == 1: + return False + if schema.returns[0].alias_info is None: + return False + alias_set = schema.returns[0].alias_info.after_set + if len(alias_set) != 1: + return False + loc = next(iter(alias_set)) + if len(schema.arguments) < 1: + return False + first_arg = schema.arguments[0] + if first_arg.alias_info is None: + return False + if not first_arg.alias_info.is_write: + return False + alias_set = first_arg.alias_info.after_set + if len(alias_set) != 1: + return False + if loc != next(iter(alias_set)): + return False + for arg in schema.arguments[1:]: + if arg.alias_info is not None: + return False + return True + + +def fill_defaults(schema, args, kwargs): + new_args = [] + new_kwargs = {} + for i in range(len(schema.arguments)): + info = schema.arguments[i] + if info.kwarg_only: + if info.name in kwargs: + new_kwargs[info.name] = kwargs[info.name] + else: + new_kwargs[info.name] = info.default_value + else: + if i < len(args): + new_args.append(args[i]) + else: + new_args.append(info.default_value) + return tuple(new_args), new_kwargs + + +def zip_schema( + schema: _C.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Iterable[Tuple[_C.Argument, Any]]: + """zips schema.arguments and (args, kwargs) together. + + Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload: + that is, kwargs must be keyword-only arguments and default values may be omitted. + """ + assert len(schema.arguments) >= len(args) + len(kwargs) + for i in range(len(schema.arguments)): + info = schema.arguments[i] + if info.kwarg_only: + if info.name in kwargs: + yield info, kwargs[info.name] + continue + if i >= len(args): + # args that are equal to their default values are not populated + # if they are followed by args that are equal to their defaults. + # Skip these. + continue + yield info, args[i] + return + + +def can_generate_trivial_fake_impl(op: torch._ops.OpOverload) -> bool: + assert isinstance(op, torch._ops.OpOverload) + if is_builtin(op): + # We control the built-ins. These may (in rare cases) + # do input metadata mutation (which we have banned on custom ops) + return False + schema = op._schema + # It's suspicious if the op is not mutable but returns nothing, so we return False out of an abundance of caution + if not schema.is_mutable: + return False + if len(schema.returns) > 0: + return False + # If the op returns nothing, then it has a trivial fake impl. + return True + + +def requires_set_python_module() -> bool: + """If an op was defined in C++ and extended from Python using the + torch.library APIs, returns if we require that there have been a + m.set_python_module("mylib.ops") call from C++ that associates + the C++ op with a python module. + """ + return getattr(_utils_internal, "REQUIRES_SET_PYTHON_MODULE", True) + + +def handle_dispatch_mode(curr_mode, op_overload, *args, **kwargs): + assert isinstance(curr_mode, torch.utils._python_dispatch.TorchDispatchMode) + overload_types = [] + args_flattened, _ = torch.utils._pytree.tree_flatten((args, kwargs.values())) + for a in args_flattened: + # TODO: need to double check the semantics of the "types" argument to torch_dispatch. + # It's generated in PyInterpreter.cpp, but seems to be generated in two places, + # where in one case we only include tensors with the python key, and in another + # we include **all** tensors. + if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(a).has( + torch._C.DispatchKey.Python + ): + overload_types.append(type(a)) + # TODO: check that I got these args correct (in C++, we pass in "0000"??) + + return curr_mode.__torch_dispatch__(op_overload, overload_types, args, kwargs) + + +def has_kwarg_only_args(schema: _C.FunctionSchema): + return any(a.kwarg_only for a in schema.arguments) + + +def has_kwarg_only_tensors(schema: _C.FunctionSchema): + for a in schema.arguments: + if not (is_tensor_like_type(a.type) or is_tensorlist_like_type(a.type)): + continue + if not a.kwarg_only: + continue + return True + return False diff --git a/parrot/lib/python3.10/site-packages/torch/ao/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..32b1048ad35dbefc78a2654c7e7ca6ec38f60710 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/__init__.py @@ -0,0 +1,17 @@ +# mypy: allow-untyped-defs +# torch.ao is a package with a lot of interdependencies. +# We will use lazy import to avoid cyclic dependencies here. + + +__all__ = [ + "nn", + "ns", + "quantization", + "pruning", +] + +def __getattr__(name): + if name in __all__: + import importlib + return importlib.import_module("." + name, __name__) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py new file mode 100644 index 0000000000000000000000000000000000000000..f56fa511f991b547841251a3ce0dfed36416f5b3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py @@ -0,0 +1,310 @@ +# mypy: allow-untyped-defs +import abc +import torch +from typing import Optional, Tuple, List, Any, Dict +from ...sparsifier import base_sparsifier +from collections import defaultdict +from torch import nn +import copy +from ...sparsifier import utils +from torch.nn.utils import parametrize +import sys +import warnings + +if not sys.warnoptions: + # to suppress repeated warnings when being used in a training loop. + warnings.simplefilter("once") + +__all__ = ['BaseDataSparsifier'] + +EMBEDDING_TYPES = { + nn.Embedding, + nn.EmbeddingBag, +} + +SUPPORTED_TYPES = { + torch.Tensor, + nn.Parameter, + *EMBEDDING_TYPES, +} + + +class _Container(nn.Module): + pass + + +class BaseDataSparsifier(base_sparsifier.BaseSparsifier): + r""" + Base Data Sparsifier class for all Data sparsifiers. + The abstract class accepts raw torch tensors / embedding / embedding bags (refer to SUPPORTED_TYPES above) + to prepare for sparsification. + In this case, mask (and parametrizations) is owned by the class and not by the user. + Specifically, the container object inside the class maintains the mask and parametrizations of the input data + + Args: + data_list (list of tuples) + list of (name, data) tuples to sparsify. Lookup SUPPORTED_TYPES + for type of data. Internally, a container module handles the data sparsification. + + defaults (dict) + default configurations will be attached to the + configuration. Only the keys that don't exist in the `config` will + be updated. + Example:: + >>> # xdoctest: +SKIP + >>> data_list = [('tensor_1', torch.randn(3,3)), ('tensor_2', torch.randn(4,4))] + >>> defaults = {'sparsity_level': 0.7} + >>> sparsifier = DerivedDataSparsifier(data_list = data_list, **defaults) # Some sparsifier that inherits BaseDataSparsifier + >>> new_tensor_to_add = {'name': 'tensor_3', 'data': torch.randn(5,5), 'sparsity_level': 0.3} + >>> sparsifier.add_data(**new_tensor_to_add) + >>> # tensor_1 and tensor_2 will have sparsity_level of 0.7 but tensor_3 will have sparsity_level=0.3 + """ + def __init__(self, data_list: Optional[List[Tuple[str, Any]]] = None, **defaults): + super().__init__(defaults=defaults) + + self._container = _Container() + + self.data_groups: Dict[str, Dict] = defaultdict(dict) # name -> {**config} + if data_list is not None: + # add data with default config here + [self.add_data(name, data, **self.defaults) for name, data in data_list] + + def prepare(self): + raise NotImplementedError("this function is undefined for this class") + + def _extract_weight(self, data): + # extract the weight parameter instead of underlying data + if type(data) in [torch.Tensor, nn.Parameter]: + return data + elif type(data) in EMBEDDING_TYPES: + return data.weight + + def add_data(self, name: str, data, reuse_mask=True, **config): + r""" Configures and parametrizes the internal container model with name and data. + + **Note**: + 1. If the data with name already exists, it replaces the data. + 2. While replacing, the old mask is reused when `reuse_mask=True` + 3. If `reuse_mask=True`, then the replacing data needs to have the same shape as that of old data. + 4. By default, the config of the replaced data is used as config for the replacing data, unless something + is specified in the config dictionary. + """ + assert type(data) in SUPPORTED_TYPES, \ + "specified data type not supported at the moment" + local_args = copy.deepcopy(self.defaults) + local_args.update(config) + weight = self._extract_weight(data) + + # Bookkeeping in the container class + mask = local_args.get('mask', torch.ones_like(weight)) + param_class = local_args.get('parametrization', utils.FakeSparsity) + + if name in self.state: + # If the named data already exists - replace + warnings.warn("Replacing existing data of the same name. - Did you mean a different name?") + + # reuse old config + old_args = self.data_groups[name] + local_args = copy.deepcopy(old_args) + local_args.update(config) + + if reuse_mask: + current_data = self.get_data(name=name) + assert weight.shape == current_data.shape, \ + "to retain the old mask, the shape of the new data must be the same as the previous one" + mask = self.get_mask(name=name) # reuse mask instead of creating a new one + + self._delete_data(name=name) + + # parameter creates a deepcopy of the weight inside, so create a buffer + self._container.register_buffer(name=name, tensor=weight) + parametrize.register_parametrization(self._container, name, param_class(mask)) + self.state[name]['mask'] = mask + self.data_groups[name] = local_args + return getattr(self._container, name) + + def get_data(self, name: str, return_original: bool = True): + r"""Returns weight tensor (or data) + Args: + - name: name of the data to be returned + - return_original returns weight tensor without applying parametrization if True + else - returns the sparsified version (parametrized) + """ + if name not in self.data_groups: + raise ValueError("data with specified name does not exist") + + if return_original: + if not parametrize.is_parametrized(self._container, name): + raise ValueError("mask squashed - original mask value does not exist") + data = getattr(self._container.parametrizations, name).original + return data + else: + return getattr(self._container, name) + + def _convert_mask(self, states, sparse_coo=True): + r"""Converts the mask to sparse coo or dense tensors depending on the `sparse_coo` argument. + """ + states = copy.deepcopy(states) + for state in states.values(): + if sparse_coo: + state['mask'] = state['mask'].to_sparse_coo() + else: + state['mask'] = state['mask'].to_dense() + + return states + + def state_dict(self): + r"""Returns the state of the optimizer as a :class:`dict`. + + It contains: + * state - contains name -> mask mapping. + * data_groups - a list containing all sparsity configuration groups + with the key name specifying the name of the data + * container_state_dict - the state dictionary of the internal + container model used for sparsification + """ + state = self._convert_mask(self.state) + return { + 'state': state, + 'data_groups': self.data_groups, + '_container': self._container.state_dict() + } + + def _load_container_from_state(self, states, data_groups, container_state_dict): + r"""This restores the state of the container specifically based on the data present in state and data_groups + If the data was parametrized, then the data would be added to the container and then parametrized, + else it would just add the attribute the container. + """ + for name, state in states.items(): + config_name = data_groups.get(name, None) + if config_name is None: + raise RuntimeError(f"Error loading {name}") + + # check if the data with such a name was parametrized, if so parametrize + # otherwise just set the attribute and continue + parametrized_name = f'parametrizations.{name}.original' + parametrized = False + data = container_state_dict.get(name, None) + if name in container_state_dict: + # the parametrization was probably removed for this + data = container_state_dict.get(name) + + elif parametrized_name in container_state_dict: + # so the weight was parametrized + data = container_state_dict.get(parametrized_name) + parametrized = True + + else: + raise RuntimeError(f"Error loading {name}") + + self._container.register_buffer(name=name, tensor=data) + + if parametrized: + # register parameter if parametrized + mask = state.get('mask', torch.ones_like(data)) + param_class = data_groups.get('parametrization', utils.FakeSparsity) # change once public_api for utils is fixed! + parametrize.register_parametrization(self._container, name, param_class(mask)) + + def load_state_dict(self, state_dict, strict=True): + r"""The load_state_dict() restores the state of the sparsifier based on the state_dict + + Args: + * state_dict - the dictionary that to which the current sparsifier needs to be restored to + * strict - If True - the sparsifier is reset and is restored exactly to the state in state_dict. + If False - the current sparsifier is not reset before loading the state_dict i.e. data added + before loading the state_dict is not erased. + """ + states = copy.deepcopy(state_dict['state']) + data_groups = copy.deepcopy(state_dict['data_groups']) + container_state_dict = copy.deepcopy(state_dict['_container']) + + states = self._convert_mask(states, sparse_coo=False) # convert sparse coo mask to dense + if strict: + # if strict load -> then reset container + self._container = _Container() + + self._load_container_from_state(states, data_groups, container_state_dict) + + if not strict: + states.update(self.state) + data_groups.update(self.data_groups) + + self.__setstate__({'state': states, 'data_groups': data_groups}) + + def __setstate__(self, state): + if '_container' in state: # If container object is in state then load model + container_dict = state.pop('_container') + self._container = _Container() + state['state'] = self._convert_mask(state['state'], sparse_coo=False) # convert sparse coo mask to dense + self._load_container_from_state(state['state'], state['data_groups'], container_dict) + + self.__dict__.update(state) + + def __getstate__(self): + state = self._convert_mask(self.state) + return { + 'defaults': self.defaults, + 'state': state, + 'data_groups': self.data_groups, + '_container': self._container.state_dict() + } + + def __repr__(self): + format_string = self.__class__.__name__ + ' (' + for name, sparse_args in self.data_groups.items(): + format_string += '\n' + format_string += '\tData Group\n' + format_string += f'\t name: {name}\n' + for key in sorted(sparse_args.keys()): + if key == 'data': + continue + format_string += f'\t {key}: {sparse_args[key]}\n' + format_string += ')' + return format_string + + def get_mask(self, name: str): + if name not in self.state: + raise ValueError("data with specified name does not exist") + return self.state[name]['mask'] + + def squash_mask(self, *args, leave_parametrized=True, names=None, **kwargs): + r"""Squashes the sparse masks into the appropriate tensors. Also, accepts list of strings + to squash mask for. If none, squashes mask for all the keys + kwargs: + * names: list of strings to squash mask for + * sparsified: if true - applies the mask before squashing + if false - does not apply the mask before squashing + """ + if names is None: + names = list(self.data_groups.keys()) + for name in names: + parametrize.remove_parametrizations(self._container, name, leave_parametrized=leave_parametrized) + + def step(self): + if not self.enable_mask_update: + return + with torch.no_grad(): + for name, config in self.data_groups.items(): + # get non-sparsified data + data = self.get_data(name) + # need name for the mask otherwise can directly pass mask? + self.update_mask(name, data, **config) + + @abc.abstractmethod + def update_mask(self, name, data, **kwargs): + pass + + def _delete_data(self, name): + """Detaches some data from the sparsifier. + + Args: + name (str) + Name of the data to be removed from the sparsifier + + Note: + Currently private. Kind of used as a helper function when replacing data of the same name + """ + self.squash_mask(names=[name], leave_parametrized=False) # do not apply the mask while deleting + delattr(self._container, name) + self.state.pop(name) + self.data_groups.pop(name) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_mappings.py b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..70a0c785190f21ad73cf20c330c87bf888df0114 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_mappings.py @@ -0,0 +1,19 @@ +# mypy: allow-untyped-defs +__all__ = [ + "get_static_sparse_quantized_mapping", + "get_dynamic_sparse_quantized_mapping", +] + +def get_static_sparse_quantized_mapping(): + import torch.ao.nn.sparse + _static_sparse_quantized_mapping = { + torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear, + } + return _static_sparse_quantized_mapping + +def get_dynamic_sparse_quantized_mapping(): + import torch.ao.nn.sparse + _dynamic_sparse_quantized_mapping = { + torch.nn.Linear: torch.ao.nn.sparse.quantized.dynamic.Linear, + } + return _dynamic_sparse_quantized_mapping diff --git a/parrot/lib/python3.10/site-packages/torch/func/__init__.py b/parrot/lib/python3.10/site-packages/torch/func/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0786456dec0f2346e56e76fbfa18b12acd49d5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/func/__init__.py @@ -0,0 +1,13 @@ +from torch._functorch.eager_transforms import ( + vjp, + jvp, + jacrev, + jacfwd, + hessian, + functionalize, + linearize +) +from torch._functorch.apis import grad, grad_and_value +from torch._functorch.functional_call import functional_call, stack_module_state +from torch._functorch.batch_norm_replacement import replace_all_batch_norm_modules_ +from torch._functorch.apis import vmap diff --git a/parrot/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 b/parrot/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..c541a86da2232cdc4fceea88a602da2cfd5ded46 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9eb01021bb58ec36925daeb5a56606bfab558332eb5f5119e990e4c208ed2581 +size 169089 diff --git a/parrot/lib/python3.10/site-packages/torch/mps/__init__.py b/parrot/lib/python3.10/site-packages/torch/mps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0538ae50d1ad8658a47054c095e7efe0e7edc591 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/mps/__init__.py @@ -0,0 +1,148 @@ +# mypy: allow-untyped-defs +r""" +This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python. +Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased +performance can be achieved, by running work on the metal GPU(s). +See https://developer.apple.com/documentation/metalperformanceshaders for more details. +""" +from typing import Union + +import torch +from .. import Tensor + +_is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False) +_default_mps_generator: torch._C.Generator = None # type: ignore[assignment] + + +# local helper function (not public or exported) +def _get_default_mps_generator() -> torch._C.Generator: + global _default_mps_generator + if _default_mps_generator is None: + _default_mps_generator = torch._C._mps_get_default_generator() + return _default_mps_generator + + +def device_count() -> int: + r"""Returns the number of available MPS devices.""" + return int(torch._C._has_mps and torch._C._mps_is_available()) + + +def synchronize() -> None: + r"""Waits for all kernels in all streams on a MPS device to complete.""" + return torch._C._mps_deviceSynchronize() + + +def get_rng_state(device: Union[int, str, torch.device] = "mps") -> Tensor: + r"""Returns the random number generator state as a ByteTensor. + + Args: + device (torch.device or int, optional): The device to return the RNG state of. + Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device). + """ + return _get_default_mps_generator().get_state() + + +def set_rng_state( + new_state: Tensor, device: Union[int, str, torch.device] = "mps" +) -> None: + r"""Sets the random number generator state. + + Args: + new_state (torch.ByteTensor): The desired state + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device). + """ + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + _get_default_mps_generator().set_state(new_state_copy) + + +def manual_seed(seed: int) -> None: + r"""Sets the seed for generating random numbers. + + Args: + seed (int): The desired seed. + """ + # the torch.mps.manual_seed() can be called from the global + # torch.manual_seed() in torch/random.py. So we need to make + # sure mps is available (otherwise we just return without + # erroring out) + if not torch._C._has_mps: + return + seed = int(seed) + _get_default_mps_generator().manual_seed(seed) + + +def seed() -> None: + r"""Sets the seed for generating random numbers to a random number.""" + _get_default_mps_generator().seed() + + +def empty_cache() -> None: + r"""Releases all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU applications. + """ + torch._C._mps_emptyCache() + + +def set_per_process_memory_fraction(fraction) -> None: + r"""Set memory fraction for limiting process's memory allocation on MPS device. + The allowed value equals the fraction multiplied by recommended maximum device memory + (obtained from Metal API device.recommendedMaxWorkingSetSize). + If trying to allocate more than the allowed value in a process, it will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction. + + .. note:: + Passing 0 to fraction means unlimited allocations + (may cause system failure if out of memory). + Passing fraction greater than 1.0 allows limits beyond the value + returned from device.recommendedMaxWorkingSetSize. + """ + + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 2: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2") + + torch._C._mps_setMemoryFraction(fraction) + + +def current_allocated_memory() -> int: + r"""Returns the current GPU memory occupied by tensors in bytes. + + .. note:: + The returned size does not include cached allocations in + memory pools of MPSAllocator. + """ + return torch._C._mps_currentAllocatedMemory() + + +def driver_allocated_memory() -> int: + r"""Returns total GPU memory allocated by Metal driver for the process in bytes. + + .. note:: + The returned size includes cached allocations in MPSAllocator pools + as well as allocations from MPS/MPSGraph frameworks. + """ + return torch._C._mps_driverAllocatedMemory() + + +from . import profiler +from .event import Event + +__all__ = [ + "device_count", + "get_rng_state", + "manual_seed", + "seed", + "set_rng_state", + "synchronize", + "empty_cache", + "set_per_process_memory_fraction", + "current_allocated_memory", + "driver_allocated_memory", + "Event", + "profiler", +] diff --git a/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f5b11e39cd31ef38a309166f4e26f25679ded9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c58c5ccb7efc3b08b939a86769dcfd9200e5c70 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b78c5e779c107e90b2ac325421cb0b0018a4f311 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/mps/event.py b/parrot/lib/python3.10/site-packages/torch/mps/event.py new file mode 100644 index 0000000000000000000000000000000000000000..d619c027480c3ad6c52744afa76f35ff4cba64c0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/mps/event.py @@ -0,0 +1,46 @@ +# mypy: allow-untyped-defs +import torch + + +class Event: + r"""Wrapper around an MPS event. + + MPS events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize MPS streams. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + """ + + def __init__(self, enable_timing=False): + self.__eventId = torch._C._mps_acquireEvent(enable_timing) + + def __del__(self): + # checks if torch._C is already destroyed + if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0: + torch._C._mps_releaseEvent(self.__eventId) + + def record(self): + r"""Records the event in the default stream.""" + torch._C._mps_recordEvent(self.__eventId) + + def wait(self): + r"""Makes all future work submitted to the default stream wait for this event.""" + torch._C._mps_waitForEvent(self.__eventId) + + def query(self): + r"""Returns True if all work currently captured by event has completed.""" + return torch._C._mps_queryEvent(self.__eventId) + + def synchronize(self): + r"""Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + """ + torch._C._mps_synchronizeEvent(self.__eventId) + + def elapsed_time(self, end_event): + r"""Returns the time elapsed in milliseconds after the event was + recorded and before the end_event was recorded. + """ + return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId) diff --git a/parrot/lib/python3.10/site-packages/torch/mps/profiler.py b/parrot/lib/python3.10/site-packages/torch/mps/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ca3f55c5e6a0dc5243d210fc3557f43c85b2b5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/mps/profiler.py @@ -0,0 +1,60 @@ +# mypy: allow-untyped-defs +import contextlib + +import torch + +__all__ = ["start", "stop", "profile"] + + +def start(mode: str = "interval", wait_until_completed: bool = False) -> None: + r"""Start OS Signpost tracing from MPS backend. + + The generated OS Signposts could be recorded and viewed in + XCode Instruments Logging tool. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + mode_normalized = mode.lower().replace(" ", "") + torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed) + + +def stop(): + r"""Stops generating OS Signpost tracing from MPS backend.""" + torch._C._mps_profilerStopTrace() + + +@contextlib.contextmanager +def profile(mode: str = "interval", wait_until_completed: bool = False): + r"""Context Manager to enabling generating OS Signpost tracing from MPS backend. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + try: + start(mode, wait_until_completed) + yield + finally: + stop() diff --git a/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c1056cfd225d1a752ff24970198ddfe8b1f7ef3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e7bbab1b588cda6beb9220bcce71b6410c8b46d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/xpu/_gpu_trace.py b/parrot/lib/python3.10/site-packages/torch/xpu/_gpu_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..0407abbf24959346ed78572e4be183c877d8a855 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/xpu/_gpu_trace.py @@ -0,0 +1,75 @@ +from typing import Callable + +from torch._utils import CallbackRegistry + + +EventCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU event creation" +) +EventDeletionCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU event deletion" +) +EventRecordCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "XPU event record" +) +EventWaitCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "XPU event wait" +) +MemoryAllocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU memory allocation" +) +MemoryDeallocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU memory deallocation" +) +StreamCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU stream creation" +) +DeviceSynchronizationCallbacks: "CallbackRegistry[[]]" = CallbackRegistry( + "XPU device synchronization" +) +StreamSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU stream synchronization" +) +EventSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "XPU event synchronization" +) + + +def register_callback_for_event_creation(cb: Callable[[int], None]) -> None: + EventCreationCallbacks.add_callback(cb) + + +def register_callback_for_event_deletion(cb: Callable[[int], None]) -> None: + EventDeletionCallbacks.add_callback(cb) + + +def register_callback_for_event_record(cb: Callable[[int, int], None]) -> None: + EventRecordCallbacks.add_callback(cb) + + +def register_callback_for_event_wait(cb: Callable[[int, int], None]) -> None: + EventWaitCallbacks.add_callback(cb) + + +def register_callback_for_memory_allocation(cb: Callable[[int], None]) -> None: + MemoryAllocationCallbacks.add_callback(cb) + + +def register_callback_for_memory_deallocation(cb: Callable[[int], None]) -> None: + MemoryDeallocationCallbacks.add_callback(cb) + + +def register_callback_for_stream_creation(cb: Callable[[int], None]) -> None: + StreamCreationCallbacks.add_callback(cb) + + +def register_callback_for_device_synchronization(cb: Callable[[], None]) -> None: + DeviceSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_stream_synchronization(cb: Callable[[int], None]) -> None: + StreamSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_event_synchronization(cb: Callable[[int], None]) -> None: + EventSynchronizationCallbacks.add_callback(cb) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/candidate_sampling_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/candidate_sampling_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d4291b0aa3fca61c3b11229eb7587d5f427cf434 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/candidate_sampling_ops.py @@ -0,0 +1,513 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Wrappers for candidate sampling operations.""" + +from tensorflow.python.framework import random_seed +from tensorflow.python.ops import array_ops # pylint: disable=unused-import +from tensorflow.python.ops import gen_candidate_sampling_ops +from tensorflow.python.ops import math_ops # pylint: disable=unused-import +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export( + 'random.uniform_candidate_sampler', + v1=['random.uniform_candidate_sampler', 'nn.uniform_candidate_sampler']) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints('nn.uniform_candidate_sampler') +def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, + range_max, seed=None, name=None): + """Samples a set of classes using a uniform base distribution. + + This operation randomly samples a tensor of sampled classes + (`sampled_candidates`) from the range of integers `[0, range_max)`. + + See the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf) + for a quick course on Candidate Sampling. + + The elements of `sampled_candidates` are drawn without replacement + (if `unique=True`) or with replacement (if `unique=False`) from + the base distribution. + + The base distribution for this operation is the uniform distribution + over the range of integers `[0, range_max)`. + + In addition, this operation returns tensors `true_expected_count` + and `sampled_expected_count` representing the number of times each + of the target classes (`true_classes`) and the sampled + classes (`sampled_candidates`) is expected to occur in an average + tensor of sampled classes. These values correspond to `Q(y|x)` + defined in the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). + If `unique=True`, then these are post-rejection probabilities and we + compute them approximately. + + Note that this function (and also other `*_candidate_sampler` + functions) only gives you the ingredients to implement the various + Candidate Sampling algorithms listed in the big table in the + [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). You + still need to implement the algorithms yourself. + + For example, according to that table, the phrase "negative samples" + may mean different things in different algorithms. For instance, in + NCE, "negative samples" means `S_i` (which is just the sampled + classes) which may overlap with true classes, while in Sampled + Logistic, "negative samples" means `S_i - T_i` which excludes the + true classes. The return value `sampled_candidates` corresponds to + `S_i`, not to any specific definition of "negative samples" in any + specific algorithm. It's your responsibility to pick an algorithm + and calculate the "negative samples" defined by that algorithm + (e.g. `S_i - T_i`). + + As another example, the `true_classes` argument is for calculating + the `true_expected_count` output (as a by-product of this function's + main calculation), which may be needed by some algorithms (according + to that table). It's not for excluding true classes in the return + value `sampled_candidates`. Again that step is algorithm-specific + and should be carried out by you. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + num_true: An `int`. The number of target classes per training example. + num_sampled: An `int`. The number of classes to randomly sample. The + `sampled_candidates` return value will have shape `[num_sampled]`. If + `unique=True`, `num_sampled` must be less than or equal to `range_max`. + unique: A `bool`. Determines whether all sampled classes in a batch are + unique. + range_max: An `int`. The number of possible classes. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + sampled_candidates: A tensor of type `int64` and shape + `[num_sampled]`. The sampled classes, either with possible + duplicates (`unique=False`) or all unique (`unique=True`). As + noted above, `sampled_candidates` may overlap with true classes. + true_expected_count: A tensor of type `float`. Same shape as + `true_classes`. The expected counts under the sampling distribution + of each of `true_classes`. + sampled_expected_count: A tensor of type `float`. Same shape as + `sampled_candidates`. The expected counts under the sampling distribution + of each of `sampled_candidates`. + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_candidate_sampling_ops.uniform_candidate_sampler( + true_classes, num_true, num_sampled, unique, range_max, seed=seed1, + seed2=seed2, name=name) + + +@tf_export( + 'random.log_uniform_candidate_sampler', + v1=[ + 'random.log_uniform_candidate_sampler', + 'nn.log_uniform_candidate_sampler' + ]) +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints('nn.log_uniform_candidate_sampler') +def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, + range_max, seed=None, name=None): + """Samples a set of classes using a log-uniform (Zipfian) base distribution. + + This operation randomly samples a tensor of sampled classes + (`sampled_candidates`) from the range of integers `[0, range_max)`. + + See the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf) + for a quick course on Candidate Sampling. + + The elements of `sampled_candidates` are drawn without replacement + (if `unique=True`) or with replacement (if `unique=False`) from + the base distribution. + + The base distribution for this operation is an approximately log-uniform + or Zipfian distribution: + + `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` + + This sampler is useful when the target classes approximately follow such + a distribution - for example, if the classes represent words in a lexicon + sorted in decreasing order of frequency. If your classes are not ordered by + decreasing frequency, do not use this op. + + In addition, this operation returns tensors `true_expected_count` + and `sampled_expected_count` representing the number of times each + of the target classes (`true_classes`) and the sampled + classes (`sampled_candidates`) is expected to occur in an average + tensor of sampled classes. These values correspond to `Q(y|x)` + defined in the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). + If `unique=True`, then these are post-rejection probabilities and we + compute them approximately. + + Note that this function (and also other `*_candidate_sampler` + functions) only gives you the ingredients to implement the various + Candidate Sampling algorithms listed in the big table in the + [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). You + still need to implement the algorithms yourself. + + For example, according to that table, the phrase "negative samples" + may mean different things in different algorithms. For instance, in + NCE, "negative samples" means `S_i` (which is just the sampled + classes) which may overlap with true classes, while in Sampled + Logistic, "negative samples" means `S_i - T_i` which excludes the + true classes. The return value `sampled_candidates` corresponds to + `S_i`, not to any specific definition of "negative samples" in any + specific algorithm. It's your responsibility to pick an algorithm + and calculate the "negative samples" defined by that algorithm + (e.g. `S_i - T_i`). + + As another example, the `true_classes` argument is for calculating + the `true_expected_count` output (as a by-product of this function's + main calculation), which may be needed by some algorithms (according + to that table). It's not for excluding true classes in the return + value `sampled_candidates`. Again that step is algorithm-specific + and should be carried out by you. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + num_true: An `int`. The number of target classes per training example. + num_sampled: An `int`. The number of classes to randomly sample. + unique: A `bool`. Determines whether all sampled classes in a batch are + unique. + range_max: An `int`. The number of possible classes. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + sampled_candidates: A tensor of type `int64` and shape + `[num_sampled]`. The sampled classes. As noted above, + `sampled_candidates` may overlap with true classes. + true_expected_count: A tensor of type `float`. Same shape as + `true_classes`. The expected counts under the sampling distribution + of each of `true_classes`. + sampled_expected_count: A tensor of type `float`. Same shape as + `sampled_candidates`. The expected counts under the sampling distribution + of each of `sampled_candidates`. + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_candidate_sampling_ops.log_uniform_candidate_sampler( + true_classes, num_true, num_sampled, unique, range_max, seed=seed1, + seed2=seed2, name=name) + + +@tf_export( + 'random.learned_unigram_candidate_sampler', + 'nn.learned_unigram_candidate_sampler') +@dispatch.add_dispatch_support +@deprecation.deprecated_endpoints(['nn.learned_unigram_candidate_sampler']) +def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, + unique, range_max, seed=None, name=None): + """Samples a set of classes from a distribution learned during training. + + This operation randomly samples a tensor of sampled classes + (`sampled_candidates`) from the range of integers `[0, range_max)`. + + See the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf) + for a quick course on Candidate Sampling. + + The elements of `sampled_candidates` are drawn without replacement + (if `unique=True`) or with replacement (if `unique=False`) from + the base distribution. + + The base distribution for this operation is constructed on the fly + during training. It is a unigram distribution over the target + classes seen so far during training. Every integer in `[0, range_max)` + begins with a weight of 1, and is incremented by 1 each time it is + seen as a target class. The base distribution is not saved to checkpoints, + so it is reset when the model is reloaded. + + In addition, this operation returns tensors `true_expected_count` + and `sampled_expected_count` representing the number of times each + of the target classes (`true_classes`) and the sampled + classes (`sampled_candidates`) is expected to occur in an average + tensor of sampled classes. These values correspond to `Q(y|x)` + defined in the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). + If `unique=True`, then these are post-rejection probabilities and we + compute them approximately. + + Note that this function (and also other `*_candidate_sampler` + functions) only gives you the ingredients to implement the various + Candidate Sampling algorithms listed in the big table in the + [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). You + still need to implement the algorithms yourself. + + For example, according to that table, the phrase "negative samples" + may mean different things in different algorithms. For instance, in + NCE, "negative samples" means `S_i` (which is just the sampled + classes) which may overlap with true classes, while in Sampled + Logistic, "negative samples" means `S_i - T_i` which excludes the + true classes. The return value `sampled_candidates` corresponds to + `S_i`, not to any specific definition of "negative samples" in any + specific algorithm. It's your responsibility to pick an algorithm + and calculate the "negative samples" defined by that algorithm + (e.g. `S_i - T_i`). + + As another example, the `true_classes` argument is for calculating + the `true_expected_count` output (as a by-product of this function's + main calculation), which may be needed by some algorithms (according + to that table). It's not for excluding true classes in the return + value `sampled_candidates`. Again that step is algorithm-specific + and should be carried out by you. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + num_true: An `int`. The number of target classes per training example. + num_sampled: An `int`. The number of classes to randomly sample. + unique: A `bool`. Determines whether all sampled classes in a batch are + unique. + range_max: An `int`. The number of possible classes. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + sampled_candidates: A tensor of type `int64` and shape + `[num_sampled]`. The sampled classes. As noted above, + `sampled_candidates` may overlap with true classes. + true_expected_count: A tensor of type `float`. Same shape as + `true_classes`. The expected counts under the sampling distribution + of each of `true_classes`. + sampled_expected_count: A tensor of type `float`. Same shape as + `sampled_candidates`. The expected counts under the sampling distribution + of each of `sampled_candidates`. + + """ + seed1, seed2 = random_seed.get_seed(seed) + # Limiting to Max int32 value + if range_max > 2147483647: + raise ValueError(f'Value of range_max:{range_max} is too large to handle') + return gen_candidate_sampling_ops.learned_unigram_candidate_sampler( + true_classes, num_true, num_sampled, unique, range_max, seed=seed1, + seed2=seed2, name=name) + + +@tf_export('random.fixed_unigram_candidate_sampler', + 'nn.fixed_unigram_candidate_sampler') +@dispatch.add_dispatch_support +def fixed_unigram_candidate_sampler(true_classes, + num_true, + num_sampled, + unique, + range_max, + vocab_file='', + distortion=1.0, + num_reserved_ids=0, + num_shards=1, + shard=0, + unigrams=(), + seed=None, + name=None): + """Samples a set of classes using the provided (fixed) base distribution. + + This operation randomly samples a tensor of sampled classes + (`sampled_candidates`) from the range of integers `[0, range_max)`. + + See the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf) + for a quick course on Candidate Sampling. + + The elements of `sampled_candidates` are drawn without replacement + (if `unique=True`) or with replacement (if `unique=False`) from + the base distribution. + + The base distribution is read from a file or passed in as an + in-memory array. There is also an option to skew the distribution by + applying a distortion power to the weights. + + In addition, this operation returns tensors `true_expected_count` + and `sampled_expected_count` representing the number of times each + of the target classes (`true_classes`) and the sampled + classes (`sampled_candidates`) is expected to occur in an average + tensor of sampled classes. These values correspond to `Q(y|x)` + defined in the [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). + If `unique=True`, then these are post-rejection probabilities and we + compute them approximately. + + Note that this function (and also other `*_candidate_sampler` + functions) only gives you the ingredients to implement the various + Candidate Sampling algorithms listed in the big table in the + [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). You + still need to implement the algorithms yourself. + + For example, according to that table, the phrase "negative samples" + may mean different things in different algorithms. For instance, in + NCE, "negative samples" means `S_i` (which is just the sampled + classes) which may overlap with true classes, while in Sampled + Logistic, "negative samples" means `S_i - T_i` which excludes the + true classes. The return value `sampled_candidates` corresponds to + `S_i`, not to any specific definition of "negative samples" in any + specific algorithm. It's your responsibility to pick an algorithm + and calculate the "negative samples" defined by that algorithm + (e.g. `S_i - T_i`). + + As another example, the `true_classes` argument is for calculating + the `true_expected_count` output (as a by-product of this function's + main calculation), which may be needed by some algorithms (according + to that table). It's not for excluding true classes in the return + value `sampled_candidates`. Again that step is algorithm-specific + and should be carried out by you. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + num_true: An `int`. The number of target classes per training example. + num_sampled: An `int`. The number of classes to randomly sample. + unique: A `bool`. Determines whether all sampled classes in a batch are + unique. + range_max: An `int`. The number of possible classes. + vocab_file: Each valid line in this file (which should have a CSV-like + format) corresponds to a valid word ID. IDs are in sequential order, + starting from num_reserved_ids. The last entry in each line is expected + to be a value corresponding to the count or relative probability. Exactly + one of `vocab_file` and `unigrams` needs to be passed to this operation. + distortion: The distortion is used to skew the unigram probability + distribution. Each weight is first raised to the distortion's power + before adding to the internal unigram distribution. As a result, + `distortion = 1.0` gives regular unigram sampling (as defined by the vocab + file), and `distortion = 0.0` gives a uniform distribution. + num_reserved_ids: Optionally some reserved IDs can be added in the range + `[0, num_reserved_ids)` by the users. One use case is that a special + unknown word token is used as ID 0. These IDs will have a sampling + probability of 0. + num_shards: A sampler can be used to sample from a subset of the original + range in order to speed up the whole computation through parallelism. This + parameter (together with `shard`) indicates the number of partitions that + are being used in the overall computation. + shard: A sampler can be used to sample from a subset of the original range + in order to speed up the whole computation through parallelism. This + parameter (together with `num_shards`) indicates the particular partition + number of the operation, when partitioning is being used. + unigrams: A list of unigram counts or probabilities, one per ID in + sequential order. Exactly one of `vocab_file` and `unigrams` should be + passed to this operation. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + sampled_candidates: A tensor of type `int64` and shape + `[num_sampled]`. The sampled classes. As noted above, + `sampled_candidates` may overlap with true classes. + true_expected_count: A tensor of type `float`. Same shape as + `true_classes`. The expected counts under the sampling distribution + of each of `true_classes`. + sampled_expected_count: A tensor of type `float`. Same shape as + `sampled_candidates`. The expected counts under the sampling distribution + of each of `sampled_candidates`. + + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_candidate_sampling_ops.fixed_unigram_candidate_sampler( + true_classes, num_true, num_sampled, unique, range_max, + vocab_file=vocab_file, distortion=distortion, + num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, + unigrams=unigrams, seed=seed1, seed2=seed2, name=name) + + +@tf_export('random.all_candidate_sampler', 'nn.all_candidate_sampler') +def all_candidate_sampler(true_classes, num_true, num_sampled, unique, + seed=None, name=None): + """Generate the set of all classes. + + Deterministically generates and returns the set of all possible classes. + For testing purposes. There is no need to use this, since you might as + well use full softmax or full logistic regression. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + num_true: An `int`. The number of target classes per training example. + num_sampled: An `int`. The number of possible classes. + unique: A `bool`. Ignored. + unique. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. + This operation deterministically returns the entire range + `[0, num_sampled]`. + true_expected_count: A tensor of type `float`. Same shape as + `true_classes`. The expected counts under the sampling distribution + of each of `true_classes`. All returned values are 1.0. + sampled_expected_count: A tensor of type `float`. Same shape as + `sampled_candidates`. The expected counts under the sampling distribution + of each of `sampled_candidates`. All returned values are 1.0. + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_candidate_sampling_ops.all_candidate_sampler( + true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2, + name=name) + + +@tf_export('nn.compute_accidental_hits') +@dispatch.add_dispatch_support +def compute_accidental_hits(true_classes, sampled_candidates, num_true, + seed=None, name=None): + """Compute the position ids in `sampled_candidates` matching `true_classes`. + + In Candidate Sampling, this operation facilitates virtually removing + sampled classes which happen to match target classes. This is done + in Sampled Softmax and Sampled Logistic. + + See our [Candidate Sampling Algorithms + Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). + + We presuppose that the `sampled_candidates` are unique. + + We call it an 'accidental hit' when one of the target classes + matches one of the sampled classes. This operation reports + accidental hits as triples `(index, id, weight)`, where `index` + represents the row number in `true_classes`, `id` represents the + position in `sampled_candidates`, and weight is `-FLOAT_MAX`. + + The result of this op should be passed through a `sparse_to_dense` + operation, then added to the logits of the sampled classes. This + removes the contradictory effect of accidentally sampling the true + target classes as noise classes for the same example. + + Args: + true_classes: A `Tensor` of type `int64` and shape `[batch_size, + num_true]`. The target classes. + sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. + The sampled_candidates output of CandidateSampler. + num_true: An `int`. The number of target classes per training example. + seed: An `int`. An operation-specific seed. Default is 0. + name: A name for the operation (optional). + + Returns: + indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`. + Values indicate rows in `true_classes`. + ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`. + Values indicate positions in `sampled_candidates`. + weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`. + Each value is `-FLOAT_MAX`. + + """ + seed1, seed2 = random_seed.get_seed(seed) + return gen_candidate_sampling_ops.compute_accidental_hits( + true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2, + name=name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_case.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_case.py new file mode 100644 index 0000000000000000000000000000000000000000..be7beca29fe10a50e2cbc4e549cd27a80b64e0ce --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_case.py @@ -0,0 +1,417 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Case functions for Control Flow Operations.""" + +import collections +import functools +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("case", v1=[]) +@dispatch.add_dispatch_support +def case_v2(pred_fn_pairs, + default=None, + exclusive=False, + strict=False, + name="case"): + """Create a case operation. + + See also `tf.switch_case`. + + The `pred_fn_pairs` parameter is a list of pairs of size N. + Each pair contains a boolean scalar tensor and a python callable that + creates the tensors to be returned if the boolean evaluates to True. + `default` is a callable generating a list of tensors. All the callables + in `pred_fn_pairs` as well as `default` (if provided) should return the same + number and types of tensors. + + If `exclusive==True`, all predicates are evaluated, and an exception is + thrown if more than one of the predicates evaluates to `True`. + If `exclusive==False`, execution stops at the first predicate which + evaluates to True, and the tensors generated by the corresponding function + are returned immediately. If none of the predicates evaluate to True, this + operation returns the tensors generated by `default`. + + `tf.case` supports nested structures as implemented in + `tf.nest`. All of the callables must return the same (possibly nested) value + structure of lists, tuples, and/or named tuples. Singleton lists and tuples + form the only exceptions to this: when returned by a callable, they are + implicitly unpacked to single values. This behavior is disabled by passing + `strict=True`. + + @compatibility(v2) + `pred_fn_pairs` could be a dictionary in v1. However, tf.Tensor and + tf.Variable are no longer hashable in v2, so cannot be used as a key for a + dictionary. Please use a list or a tuple instead. + @end_compatibility + + + **Example 1:** + + Pseudocode: + + ``` + if (x < y) return 17; + else return 23; + ``` + + Expressions: + + ```python + f1 = lambda: tf.constant(17) + f2 = lambda: tf.constant(23) + r = tf.case([(tf.less(x, y), f1)], default=f2) + ``` + + **Example 2:** + + Pseudocode: + + ``` + if (x < y && x > z) raise OpError("Only one predicate may evaluate to True"); + if (x < y) return 17; + else if (x > z) return 23; + else return -1; + ``` + + Expressions: + + ```python + def f1(): return tf.constant(17) + def f2(): return tf.constant(23) + def f3(): return tf.constant(-1) + r = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)], + default=f3, exclusive=True) + ``` + + Args: + pred_fn_pairs: List of pairs of a boolean scalar tensor and a callable which + returns a list of tensors. + default: Optional callable that returns a list of tensors. + exclusive: True iff at most one predicate is allowed to evaluate to `True`. + strict: A boolean that enables/disables 'strict' mode; see above. + name: A name for this operation (optional). + + Returns: + The tensors returned by the first pair whose predicate evaluated to True, or + those returned by `default` if none does. + + Raises: + TypeError: If `pred_fn_pairs` is not a list/tuple. + TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + """ + return _case_helper( + cond.cond, + pred_fn_pairs, + default, + exclusive, + name, + allow_python_preds=False, + strict=strict) + + +@tf_export(v1=["case"]) +@dispatch.add_dispatch_support +def case(pred_fn_pairs, + default=None, + exclusive=False, + strict=False, + name="case"): + """Create a case operation. + + See also `tf.switch_case`. + + The `pred_fn_pairs` parameter is a dict or list of pairs of size N. + Each pair contains a boolean scalar tensor and a python callable that + creates the tensors to be returned if the boolean evaluates to True. + `default` is a callable generating a list of tensors. All the callables + in `pred_fn_pairs` as well as `default` (if provided) should return the same + number and types of tensors. + + If `exclusive==True`, all predicates are evaluated, and an exception is + thrown if more than one of the predicates evaluates to `True`. + If `exclusive==False`, execution stops at the first predicate which + evaluates to True, and the tensors generated by the corresponding function + are returned immediately. If none of the predicates evaluate to True, this + operation returns the tensors generated by `default`. + + `tf.case` supports nested structures as implemented in + `tf.nest`. All of the callables must return the same (possibly nested) value + structure of lists, tuples, and/or named tuples. Singleton lists and tuples + form the only exceptions to this: when returned by a callable, they are + implicitly unpacked to single values. This behavior is disabled by passing + `strict=True`. + + If an unordered dictionary is used for `pred_fn_pairs`, the order of the + conditional tests is not guaranteed. However, the order is guaranteed to be + deterministic, so that variables created in conditional branches are created + in fixed order across runs. + + @compatibility(eager) + Unordered dictionaries are not supported in eager mode when `exclusive=False`. + Use a list of tuples instead. + @end_compatibility + + + **Example 1:** + + Pseudocode: + + ``` + if (x < y) return 17; + else return 23; + ``` + + Expressions: + + ```python + f1 = lambda: tf.constant(17) + f2 = lambda: tf.constant(23) + r = tf.case([(tf.less(x, y), f1)], default=f2) + ``` + + **Example 2:** + + Pseudocode: + + ``` + if (x < y && x > z) raise OpError("Only one predicate may evaluate to True"); + if (x < y) return 17; + else if (x > z) return 23; + else return -1; + ``` + + Expressions: + + ```python + def f1(): return tf.constant(17) + def f2(): return tf.constant(23) + def f3(): return tf.constant(-1) + r = tf.case({tf.less(x, y): f1, tf.greater(x, z): f2}, + default=f3, exclusive=True) + ``` + + Args: + pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a + callable which returns a list of tensors. + default: Optional callable that returns a list of tensors. + exclusive: True iff at most one predicate is allowed to evaluate to `True`. + strict: A boolean that enables/disables 'strict' mode; see above. + name: A name for this operation (optional). + + Returns: + The tensors returned by the first pair whose predicate evaluated to True, or + those returned by `default` if none does. + + Raises: + TypeError: If `pred_fn_pairs` is not a list/dictionary. + TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + """ + return _case_helper( + cond.cond, + pred_fn_pairs, + default, + exclusive, + name, + allow_python_preds=False, + strict=strict) + + +def _assert_at_most_n_true(predicates, n, msg): + """Returns an Assert op that checks that at most n predicates are True. + + Args: + predicates: list of bool scalar tensors. + n: maximum number of true predicates allowed. + msg: Error message. + """ + preds_c = array_ops_stack.stack(predicates, name="preds_c") + num_true_conditions = math_ops.reduce_sum( + math_ops.cast(preds_c, dtypes.int32), name="num_true_conds") + condition = math_ops.less_equal(num_true_conditions, + constant_op.constant(n, name="n_true_conds")) + preds_names = ", ".join(getattr(p, "name", "?") for p in predicates) + error_msg = [ + "%s: more than %d conditions (%s) evaluated as True:" % + (msg, n, preds_names), preds_c + ] + return control_flow_assert.Assert( + condition, data=error_msg, summarize=len(predicates)) + + +def _case_create_default_action(predicates, actions): + """Creates default action for a list of actions and their predicates. + + It uses the input actions to select an arbitrary as default and makes sure + that corresponding predicates have valid values. + + Args: + predicates: a list of bool scalar tensors + actions: a list of callable objects which return tensors. + + Returns: + a callable + """ + k = len(predicates) - 1 # could pick any + predicate, action = predicates[k], actions[k] + other_predicates, other_actions = predicates[:k], actions[:k] + + def default_action(): + others_msg = ("Implementation error: " + "selected default action #%d was called, but some of other " + "predicates are True: " % k) + default_msg = ("Input error: " + "None of conditions evaluated as True:", + array_ops_stack.stack(predicates, name="preds_c")) + with ops.control_dependencies([ + _assert_at_most_n_true( # pylint: disable=protected-access + other_predicates, n=0, msg=others_msg), + control_flow_assert.Assert(predicate, data=default_msg) + ]): + return action() + + return default_action, other_predicates, other_actions + + +def _case_helper(cond_fn, + pred_fn_pairs, + default, + exclusive, + name, + allow_python_preds=False, + **cond_kwargs): + """Implementation of case that allows for different cond functions. + + Args: + cond_fn: method that has signature and semantics of `cond` above. + pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a + callable which returns a list of tensors. + default: Optional callable that returns a list of tensors. + exclusive: True iff at most one predicate is allowed to evaluate to `True`. + name: A name for this operation (optional). + allow_python_preds: if true, pred_fn_pairs may contain Python bools in + addition to boolean Tensors + **cond_kwargs: keyword arguments that will be passed to `cond_fn`. + + Returns: + The tensors returned by the first pair whose predicate evaluated to True, or + those returned by `default` if none does. + + Raises: + TypeError: If `pred_fn_pairs` is not a list/dictionary. + TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + """ + predicates, actions = _case_verify_and_canonicalize_args( + pred_fn_pairs, exclusive, name, allow_python_preds) + with ops.name_scope(name, "case", [predicates]): + if default is None: + default, predicates, actions = _case_create_default_action( + predicates, actions) + fn = default + # To eval conditions in direct order we create nested conditions in reverse: + # cond_fn(c[0], true_fn=.., false_fn=cond_fn(c[1], ...)) + for predicate, action in reversed(list(zip(predicates, actions))): + fn = functools.partial( + cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs) + if exclusive: + with ops.control_dependencies([ + _assert_at_most_n_true( # pylint: disable=protected-access + predicates, n=1, msg="Input error: exclusive=True") + ]): + return fn() + else: + return fn() + + +def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, + allow_python_preds): + """Verifies input arguments for the case function. + + Args: + pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a + callable which returns a list of tensors. + exclusive: True iff at most one predicate is allowed to evaluate to `True`. + name: A name for the case operation. + allow_python_preds: if true, pred_fn_pairs may contain Python bools in + addition to boolean Tensors + + Raises: + TypeError: If `pred_fn_pairs` is not a list/dictionary. + TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. + TypeError: If `fns[i]` is not callable for any i, or `default` is not + callable. + + Returns: + a tuple . + """ + if not isinstance(pred_fn_pairs, (list, tuple, dict)): + raise TypeError("'pred_fn_pairs' must be a list, tuple, or dict. " + f"Received: {type(pred_fn_pairs)}") + + if isinstance(pred_fn_pairs, collections.OrderedDict): + pred_fn_pairs = pred_fn_pairs.items() + elif isinstance(pred_fn_pairs, dict): + if context.executing_eagerly(): + # No name to sort on in eager mode. Use dictionary traversal order, + # which is nondeterministic in versions of Python < 3.6 + if not exclusive: + raise ValueError("Unordered dictionaries are not supported for the " + "'pred_fn_pairs' argument when `exclusive=False` and " + "eager mode is enabled.") + pred_fn_pairs = list(pred_fn_pairs.items()) + else: + pred_fn_pairs = sorted( + pred_fn_pairs.items(), key=lambda item: item[0].name) + if not exclusive: + logging.warn( + "%s: An unordered dictionary of predicate/fn pairs was " + "provided, but exclusive=False. The order of conditional " + "tests is deterministic but not guaranteed.", name) + for pred_fn_pair in pred_fn_pairs: + if not isinstance(pred_fn_pair, tuple) or len(pred_fn_pair) != 2: + raise TypeError("Each entry in 'pred_fn_pairs' must be a 2-tuple. " + f"Received {pred_fn_pair}.") + pred, fn = pred_fn_pair + + if isinstance(pred, tensor.Tensor): + if pred.dtype != dtypes.bool: + raise TypeError("pred must be Tensor of type bool: %s" % pred.name) + elif not allow_python_preds: + raise TypeError("pred must be a Tensor, got: %s" % pred) + elif not isinstance(pred, bool): + raise TypeError("pred must be a Tensor or bool, got: %s" % pred) + + if not callable(fn): + raise TypeError("fn for pred %s must be callable." % pred.name) + + predicates, actions = zip(*pred_fn_pairs) + return predicates, actions diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_audio_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_audio_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..dd5a29489dc41e616010846d36a9772f7b6f4d73 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_audio_ops.py @@ -0,0 +1,472 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def audio_spectrogram(input: Annotated[Any, _atypes.Float32], window_size: int, stride: int, magnitude_squared:bool=False, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Produces a visualization of audio data over time. + + Spectrograms are a standard way of representing audio information as a series of + slices of frequency information, one slice for each window of time. By joining + these together into a sequence, they form a distinctive fingerprint of the sound + over time. + + This op expects to receive audio data as an input, stored as floats in the range + -1 to 1, together with a window width in samples, and a stride specifying how + far to move the window between slices. From this it generates a three + dimensional output. The first dimension is for the channels in the input, so a + stereo audio input would have two here for example. The second dimension is time, + with successive frequency slices. The third dimension has an amplitude value for + each frequency during that time slice. + + This means the layout when converted and saved as an image is rotated 90 degrees + clockwise from a typical spectrogram. Time is descending down the Y axis, and + the frequency decreases from left to right. + + Each value in the result represents the square root of the sum of the real and + imaginary parts of an FFT on the current window of samples. In this way, the + lowest dimension represents the power of each frequency in the current window, + and adjacent windows are concatenated in the next dimension. + + To get a more intuitive and visual look at what this operation does, you can run + tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the + resulting spectrogram as a PNG image. + + Args: + input: A `Tensor` of type `float32`. Float representation of audio data. + window_size: An `int`. + How wide the input window is in samples. For the highest efficiency + this should be a power of two, but other values are accepted. + stride: An `int`. + How widely apart the center of adjacent sample windows should be. + magnitude_squared: An optional `bool`. Defaults to `False`. + Whether to return the squared magnitude or just the + magnitude. Using squared magnitude can avoid extra calculations. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AudioSpectrogram", name, input, "window_size", window_size, + "stride", stride, "magnitude_squared", magnitude_squared) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return audio_spectrogram_eager_fallback( + input, window_size=window_size, stride=stride, + magnitude_squared=magnitude_squared, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + window_size = _execute.make_int(window_size, "window_size") + stride = _execute.make_int(stride, "stride") + if magnitude_squared is None: + magnitude_squared = False + magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AudioSpectrogram", input=input, window_size=window_size, + stride=stride, + magnitude_squared=magnitude_squared, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("window_size", _op._get_attr_int("window_size"), "stride", + _op._get_attr_int("stride"), "magnitude_squared", + _op._get_attr_bool("magnitude_squared")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AudioSpectrogram", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AudioSpectrogram = tf_export("raw_ops.AudioSpectrogram")(_ops.to_raw_op(audio_spectrogram)) + + +def audio_spectrogram_eager_fallback(input: Annotated[Any, _atypes.Float32], window_size: int, stride: int, magnitude_squared: bool, name, ctx) -> Annotated[Any, _atypes.Float32]: + window_size = _execute.make_int(window_size, "window_size") + stride = _execute.make_int(stride, "stride") + if magnitude_squared is None: + magnitude_squared = False + magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared") + input = _ops.convert_to_tensor(input, _dtypes.float32) + _inputs_flat = [input] + _attrs = ("window_size", window_size, "stride", stride, "magnitude_squared", + magnitude_squared) + _result = _execute.execute(b"AudioSpectrogram", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AudioSpectrogram", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_DecodeWavOutput = collections.namedtuple( + "DecodeWav", + ["audio", "sample_rate"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('audio.decode_wav') +def decode_wav(contents: Annotated[Any, _atypes.String], desired_channels:int=-1, desired_samples:int=-1, name=None): + r"""Decode a 16-bit PCM WAV file to a float tensor. + + The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. + + When desired_channels is set, if the input contains fewer channels than this + then the last channel will be duplicated to give the requested number, else if + the input has more channels than requested then the additional channels will be + ignored. + + If desired_samples is set, then the audio will be cropped or padded with zeroes + to the requested length. + + The first output contains a Tensor with the content of the audio samples. The + lowest dimension will be the number of channels, and the second will be the + number of samples. For example, a ten-sample-long stereo WAV file should give an + output shape of [10, 2]. + + Args: + contents: A `Tensor` of type `string`. + The WAV-encoded audio, usually from a file. + desired_channels: An optional `int`. Defaults to `-1`. + Number of sample channels wanted. + desired_samples: An optional `int`. Defaults to `-1`. + Length of audio requested. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (audio, sample_rate). + + audio: A `Tensor` of type `float32`. + sample_rate: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeWav", name, contents, "desired_channels", + desired_channels, "desired_samples", desired_samples) + _result = _DecodeWavOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_decode_wav( + (contents, desired_channels, desired_samples, name,), None) + if _result is not NotImplemented: + return _result + return decode_wav_eager_fallback( + contents, desired_channels=desired_channels, + desired_samples=desired_samples, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_wav, (), dict(contents=contents, + desired_channels=desired_channels, + desired_samples=desired_samples, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_decode_wav( + (contents, desired_channels, desired_samples, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if desired_channels is None: + desired_channels = -1 + desired_channels = _execute.make_int(desired_channels, "desired_channels") + if desired_samples is None: + desired_samples = -1 + desired_samples = _execute.make_int(desired_samples, "desired_samples") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeWav", contents=contents, desired_channels=desired_channels, + desired_samples=desired_samples, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_wav, (), dict(contents=contents, + desired_channels=desired_channels, + desired_samples=desired_samples, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("desired_channels", _op._get_attr_int("desired_channels"), + "desired_samples", _op._get_attr_int("desired_samples")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeWav", _inputs_flat, _attrs, _result) + _result = _DecodeWavOutput._make(_result) + return _result + +DecodeWav = tf_export("raw_ops.DecodeWav")(_ops.to_raw_op(decode_wav)) +_dispatcher_for_decode_wav = decode_wav._tf_type_based_dispatcher.Dispatch + + +def decode_wav_eager_fallback(contents: Annotated[Any, _atypes.String], desired_channels: int, desired_samples: int, name, ctx): + if desired_channels is None: + desired_channels = -1 + desired_channels = _execute.make_int(desired_channels, "desired_channels") + if desired_samples is None: + desired_samples = -1 + desired_samples = _execute.make_int(desired_samples, "desired_samples") + contents = _ops.convert_to_tensor(contents, _dtypes.string) + _inputs_flat = [contents] + _attrs = ("desired_channels", desired_channels, "desired_samples", + desired_samples) + _result = _execute.execute(b"DecodeWav", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeWav", _inputs_flat, _attrs, _result) + _result = _DecodeWavOutput._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('audio.encode_wav') +def encode_wav(audio: Annotated[Any, _atypes.Float32], sample_rate: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.String]: + r"""Encode audio data using the WAV file format. + + This operation will generate a string suitable to be saved out to create a .wav + audio file. It will be encoded in the 16-bit PCM format. It takes in float + values in the range -1.0f to 1.0f, and any outside that value will be clamped to + that range. + + `audio` is a 2-D float Tensor of shape `[length, channels]`. + `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). + + Args: + audio: A `Tensor` of type `float32`. 2-D with shape `[length, channels]`. + sample_rate: A `Tensor` of type `int32`. + Scalar containing the sample frequency. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EncodeWav", name, audio, sample_rate) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_encode_wav( + (audio, sample_rate, name,), None) + if _result is not NotImplemented: + return _result + return encode_wav_eager_fallback( + audio, sample_rate, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_wav, (), dict(audio=audio, sample_rate=sample_rate, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_encode_wav( + (audio, sample_rate, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EncodeWav", audio=audio, sample_rate=sample_rate, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + encode_wav, (), dict(audio=audio, sample_rate=sample_rate, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "EncodeWav", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +EncodeWav = tf_export("raw_ops.EncodeWav")(_ops.to_raw_op(encode_wav)) +_dispatcher_for_encode_wav = encode_wav._tf_type_based_dispatcher.Dispatch + + +def encode_wav_eager_fallback(audio: Annotated[Any, _atypes.Float32], sample_rate: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.String]: + audio = _ops.convert_to_tensor(audio, _dtypes.float32) + sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32) + _inputs_flat = [audio, sample_rate] + _attrs = None + _result = _execute.execute(b"EncodeWav", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "EncodeWav", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def mfcc(spectrogram: Annotated[Any, _atypes.Float32], sample_rate: Annotated[Any, _atypes.Int32], upper_frequency_limit:float=4000, lower_frequency_limit:float=20, filterbank_channel_count:int=40, dct_coefficient_count:int=13, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Transforms a spectrogram into a form that's useful for speech recognition. + + Mel Frequency Cepstral Coefficients are a way of representing audio data that's + been effective as an input feature for machine learning. They are created by + taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the + higher frequencies that are less significant to the human ear. They have a long + history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + is a good resource to learn more. + + Args: + spectrogram: A `Tensor` of type `float32`. + Typically produced by the Spectrogram op, with magnitude_squared + set to true. + sample_rate: A `Tensor` of type `int32`. + How many samples per second the source audio used. + upper_frequency_limit: An optional `float`. Defaults to `4000`. + The highest frequency to use when calculating the + ceptstrum. + lower_frequency_limit: An optional `float`. Defaults to `20`. + The lowest frequency to use when calculating the + ceptstrum. + filterbank_channel_count: An optional `int`. Defaults to `40`. + Resolution of the Mel bank used internally. + dct_coefficient_count: An optional `int`. Defaults to `13`. + How many output channels to produce per time slice. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Mfcc", name, spectrogram, sample_rate, "upper_frequency_limit", + upper_frequency_limit, "lower_frequency_limit", lower_frequency_limit, + "filterbank_channel_count", filterbank_channel_count, + "dct_coefficient_count", dct_coefficient_count) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return mfcc_eager_fallback( + spectrogram, sample_rate, + upper_frequency_limit=upper_frequency_limit, + lower_frequency_limit=lower_frequency_limit, + filterbank_channel_count=filterbank_channel_count, + dct_coefficient_count=dct_coefficient_count, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if upper_frequency_limit is None: + upper_frequency_limit = 4000 + upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit") + if lower_frequency_limit is None: + lower_frequency_limit = 20 + lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit") + if filterbank_channel_count is None: + filterbank_channel_count = 40 + filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count") + if dct_coefficient_count is None: + dct_coefficient_count = 13 + dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Mfcc", spectrogram=spectrogram, sample_rate=sample_rate, + upper_frequency_limit=upper_frequency_limit, + lower_frequency_limit=lower_frequency_limit, + filterbank_channel_count=filterbank_channel_count, + dct_coefficient_count=dct_coefficient_count, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("upper_frequency_limit", _op.get_attr("upper_frequency_limit"), + "lower_frequency_limit", _op.get_attr("lower_frequency_limit"), + "filterbank_channel_count", + _op._get_attr_int("filterbank_channel_count"), + "dct_coefficient_count", + _op._get_attr_int("dct_coefficient_count")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Mfcc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Mfcc = tf_export("raw_ops.Mfcc")(_ops.to_raw_op(mfcc)) + + +def mfcc_eager_fallback(spectrogram: Annotated[Any, _atypes.Float32], sample_rate: Annotated[Any, _atypes.Int32], upper_frequency_limit: float, lower_frequency_limit: float, filterbank_channel_count: int, dct_coefficient_count: int, name, ctx) -> Annotated[Any, _atypes.Float32]: + if upper_frequency_limit is None: + upper_frequency_limit = 4000 + upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit") + if lower_frequency_limit is None: + lower_frequency_limit = 20 + lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit") + if filterbank_channel_count is None: + filterbank_channel_count = 40 + filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count") + if dct_coefficient_count is None: + dct_coefficient_count = 13 + dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count") + spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32) + sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32) + _inputs_flat = [spectrogram, sample_rate] + _attrs = ("upper_frequency_limit", upper_frequency_limit, + "lower_frequency_limit", lower_frequency_limit, "filterbank_channel_count", + filterbank_channel_count, "dct_coefficient_count", dct_coefficient_count) + _result = _execute.execute(b"Mfcc", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Mfcc", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_experimental_dataset_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_experimental_dataset_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4acca4269343915f2b8e97d0b7aebb43ffdf5246 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_experimental_dataset_ops.py @@ -0,0 +1,10692 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def assert_cardinality_dataset(input_dataset: Annotated[Any, _atypes.Variant], cardinality: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + cardinality: A `Tensor` of type `int64`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AssertCardinalityDataset", name, input_dataset, cardinality, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return assert_cardinality_dataset_eager_fallback( + input_dataset, cardinality, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_cardinality_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_cardinality_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AssertCardinalityDataset", input_dataset=input_dataset, + cardinality=cardinality, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AssertCardinalityDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AssertCardinalityDataset = tf_export("raw_ops.AssertCardinalityDataset")(_ops.to_raw_op(assert_cardinality_dataset)) + + +def assert_cardinality_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], cardinality: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_cardinality_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_cardinality_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cardinality = _ops.convert_to_tensor(cardinality, _dtypes.int64) + _inputs_flat = [input_dataset, cardinality] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AssertCardinalityDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AssertCardinalityDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def assert_next_dataset(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""A transformation that asserts which transformations happen next. + + This transformation checks whether the camel-case names (i.e. "FlatMap", not + "flat_map") of the transformations following this transformation match the list + of names in the `transformations` argument. If there is a mismatch, the + transformation raises an exception. + + The check occurs when iterating over the contents of the dataset, which + means that the check happens *after* any static optimizations are applied + to the dataset graph. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + `AssertNextDataset` passes through the outputs of its input dataset. + transformations: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying the transformations that are + expected to happen next. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AssertNextDataset", name, input_dataset, transformations, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return assert_next_dataset_eager_fallback( + input_dataset, transformations, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_next_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_next_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AssertNextDataset", input_dataset=input_dataset, + transformations=transformations, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AssertNextDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AssertNextDataset = tf_export("raw_ops.AssertNextDataset")(_ops.to_raw_op(assert_next_dataset)) + + +def assert_next_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_next_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_next_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + transformations = _ops.convert_to_tensor(transformations, _dtypes.string) + _inputs_flat = [input_dataset, transformations] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AssertNextDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AssertNextDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def assert_prev_dataset(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""A transformation that asserts which transformations happened previously. + + This transformation checks the names and, optionally, the attribute name-value + pairs in the `transformations` argument against those of the transformations + that preceded this transformation. If there is a mismatch, the transformation + raises an exception. + + The check occurs when iterating over the contents of the dataset, which + means that the check happens *after* any static optimizations are applied + to the dataset graph. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + `AssertPrevDataset` passes through the outputs of its input dataset. + transformations: A `Tensor` of type `string`. + A `tf.string` vector `tf.Tensor` identifying the transformations, with optional + attribute name-value pairs, that are expected to have happened previously. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AssertPrevDataset", name, input_dataset, transformations, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return assert_prev_dataset_eager_fallback( + input_dataset, transformations, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_prev_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_prev_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AssertPrevDataset", input_dataset=input_dataset, + transformations=transformations, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AssertPrevDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AssertPrevDataset = tf_export("raw_ops.AssertPrevDataset")(_ops.to_raw_op(assert_prev_dataset)) + + +def assert_prev_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'assert_prev_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'assert_prev_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + transformations = _ops.convert_to_tensor(transformations, _dtypes.string) + _inputs_flat = [input_dataset, transformations] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"AssertPrevDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AssertPrevDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def auto_shard_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_workers: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, auto_shard_policy:int=0, num_replicas:int=0, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that shards the input dataset. + + Creates a dataset that shards the input dataset by num_workers, returning a + sharded dataset for the index-th worker. This attempts to automatically shard + a dataset by examining the Dataset graph and inserting a shard op before the + inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + + This dataset will throw a NotFound error if we cannot shard the dataset + automatically. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + num_workers: A `Tensor` of type `int64`. + A scalar representing the number of workers to distribute this dataset across. + index: A `Tensor` of type `int64`. + A scalar representing the index of the current worker out of num_workers. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + auto_shard_policy: An optional `int`. Defaults to `0`. + num_replicas: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AutoShardDataset", name, input_dataset, num_workers, index, + "auto_shard_policy", auto_shard_policy, "output_types", output_types, + "output_shapes", output_shapes, "num_replicas", num_replicas) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return auto_shard_dataset_eager_fallback( + input_dataset, num_workers, index, + auto_shard_policy=auto_shard_policy, output_types=output_types, + output_shapes=output_shapes, num_replicas=num_replicas, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'auto_shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'auto_shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if auto_shard_policy is None: + auto_shard_policy = 0 + auto_shard_policy = _execute.make_int(auto_shard_policy, "auto_shard_policy") + if num_replicas is None: + num_replicas = 0 + num_replicas = _execute.make_int(num_replicas, "num_replicas") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AutoShardDataset", input_dataset=input_dataset, + num_workers=num_workers, index=index, + output_types=output_types, + output_shapes=output_shapes, + auto_shard_policy=auto_shard_policy, + num_replicas=num_replicas, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("auto_shard_policy", _op._get_attr_int("auto_shard_policy"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "num_replicas", + _op._get_attr_int("num_replicas")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AutoShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AutoShardDataset = tf_export("raw_ops.AutoShardDataset")(_ops.to_raw_op(auto_shard_dataset)) + + +def auto_shard_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_workers: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, auto_shard_policy: int, num_replicas: int, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'auto_shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'auto_shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if auto_shard_policy is None: + auto_shard_policy = 0 + auto_shard_policy = _execute.make_int(auto_shard_policy, "auto_shard_policy") + if num_replicas is None: + num_replicas = 0 + num_replicas = _execute.make_int(num_replicas, "num_replicas") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_workers = _ops.convert_to_tensor(num_workers, _dtypes.int64) + index = _ops.convert_to_tensor(index, _dtypes.int64) + _inputs_flat = [input_dataset, num_workers, index] + _attrs = ("auto_shard_policy", auto_shard_policy, "output_types", + output_types, "output_shapes", output_shapes, "num_replicas", num_replicas) + _result = _execute.execute(b"AutoShardDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AutoShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def bytes_produced_stats_dataset(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Records the bytes size of each element of `input_dataset` in a StatsAggregator. + + Args: + input_dataset: A `Tensor` of type `variant`. + tag: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BytesProducedStatsDataset", name, input_dataset, tag, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return bytes_produced_stats_dataset_eager_fallback( + input_dataset, tag, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'bytes_produced_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'bytes_produced_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BytesProducedStatsDataset", input_dataset=input_dataset, tag=tag, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BytesProducedStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BytesProducedStatsDataset = tf_export("raw_ops.BytesProducedStatsDataset")(_ops.to_raw_op(bytes_produced_stats_dataset)) + + +def bytes_produced_stats_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'bytes_produced_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'bytes_produced_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + _inputs_flat = [input_dataset, tag] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"BytesProducedStatsDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BytesProducedStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def csv_dataset(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + filenames: A `Tensor` of type `string`. + compression_type: A `Tensor` of type `string`. + buffer_size: A `Tensor` of type `int64`. + header: A `Tensor` of type `bool`. + field_delim: A `Tensor` of type `string`. + use_quote_delim: A `Tensor` of type `bool`. + na_value: A `Tensor` of type `string`. + select_cols: A `Tensor` of type `int64`. + record_defaults: A list of `Tensor` objects with types from: `float32`, `float64`, `int32`, `int64`, `string`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CSVDataset", name, filenames, compression_type, buffer_size, + header, field_delim, use_quote_delim, na_value, select_cols, + record_defaults, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return csv_dataset_eager_fallback( + filenames, compression_type, buffer_size, header, field_delim, + use_quote_delim, na_value, select_cols, record_defaults, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'csv_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CSVDataset", filenames=filenames, compression_type=compression_type, + buffer_size=buffer_size, header=header, + field_delim=field_delim, + use_quote_delim=use_quote_delim, na_value=na_value, + select_cols=select_cols, + record_defaults=record_defaults, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CSVDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CSVDataset = tf_export("raw_ops.CSVDataset")(_ops.to_raw_op(csv_dataset)) + + +def csv_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'csv_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_output_types, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, ctx) + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + header = _ops.convert_to_tensor(header, _dtypes.bool) + field_delim = _ops.convert_to_tensor(field_delim, _dtypes.string) + use_quote_delim = _ops.convert_to_tensor(use_quote_delim, _dtypes.bool) + na_value = _ops.convert_to_tensor(na_value, _dtypes.string) + select_cols = _ops.convert_to_tensor(select_cols, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols] + list(record_defaults) + _attrs = ("output_types", _attr_output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"CSVDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CSVDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def csv_dataset_v2(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, exclude_cols: Annotated[Any, _atypes.Int64], output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + filenames: A `Tensor` of type `string`. + compression_type: A `Tensor` of type `string`. + buffer_size: A `Tensor` of type `int64`. + header: A `Tensor` of type `bool`. + field_delim: A `Tensor` of type `string`. + use_quote_delim: A `Tensor` of type `bool`. + na_value: A `Tensor` of type `string`. + select_cols: A `Tensor` of type `int64`. + record_defaults: A list of `Tensor` objects with types from: `float32`, `float64`, `int32`, `int64`, `string`. + exclude_cols: A `Tensor` of type `int64`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CSVDatasetV2", name, filenames, compression_type, buffer_size, + header, field_delim, use_quote_delim, na_value, select_cols, + record_defaults, exclude_cols, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return csv_dataset_v2_eager_fallback( + filenames, compression_type, buffer_size, header, field_delim, + use_quote_delim, na_value, select_cols, record_defaults, + exclude_cols, output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'csv_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CSVDatasetV2", filenames=filenames, + compression_type=compression_type, + buffer_size=buffer_size, header=header, + field_delim=field_delim, + use_quote_delim=use_quote_delim, na_value=na_value, + select_cols=select_cols, + record_defaults=record_defaults, + exclude_cols=exclude_cols, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CSVDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CSVDatasetV2 = tf_export("raw_ops.CSVDatasetV2")(_ops.to_raw_op(csv_dataset_v2)) + + +def csv_dataset_v2_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, exclude_cols: Annotated[Any, _atypes.Int64], output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'csv_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_output_types, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, ctx) + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + header = _ops.convert_to_tensor(header, _dtypes.bool) + field_delim = _ops.convert_to_tensor(field_delim, _dtypes.string) + use_quote_delim = _ops.convert_to_tensor(use_quote_delim, _dtypes.bool) + na_value = _ops.convert_to_tensor(na_value, _dtypes.string) + select_cols = _ops.convert_to_tensor(select_cols, _dtypes.int64) + exclude_cols = _ops.convert_to_tensor(exclude_cols, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols] + list(record_defaults) + [exclude_cols] + _attrs = ("output_types", _attr_output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"CSVDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CSVDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def choose_fastest_branch_dataset(input_dataset: Annotated[Any, _atypes.Variant], ratio_numerator: Annotated[Any, _atypes.Int64], ratio_denominator: Annotated[Any, _atypes.Int64], other_arguments, num_elements_per_branch: int, branches, other_arguments_lengths, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + ratio_numerator: A `Tensor` of type `int64`. + ratio_denominator: A `Tensor` of type `int64`. + other_arguments: A list of `Tensor` objects. + num_elements_per_branch: An `int` that is `>= 1`. + branches: A list of functions decorated with @Defun that has length `>= 1`. + other_arguments_lengths: A list of `ints` that has length `>= 1`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ChooseFastestBranchDataset", name, input_dataset, + ratio_numerator, ratio_denominator, other_arguments, + "num_elements_per_branch", num_elements_per_branch, "branches", + branches, "other_arguments_lengths", other_arguments_lengths, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return choose_fastest_branch_dataset_eager_fallback( + input_dataset, ratio_numerator, ratio_denominator, other_arguments, + num_elements_per_branch=num_elements_per_branch, branches=branches, + other_arguments_lengths=other_arguments_lengths, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_elements_per_branch = _execute.make_int(num_elements_per_branch, "num_elements_per_branch") + if not isinstance(branches, (list, tuple)): + raise TypeError( + "Expected list for 'branches' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % branches) + if not isinstance(other_arguments_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'other_arguments_lengths' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % other_arguments_lengths) + other_arguments_lengths = [_execute.make_int(_i, "other_arguments_lengths") for _i in other_arguments_lengths] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ChooseFastestBranchDataset", input_dataset=input_dataset, + ratio_numerator=ratio_numerator, + ratio_denominator=ratio_denominator, + other_arguments=other_arguments, + num_elements_per_branch=num_elements_per_branch, + branches=branches, + other_arguments_lengths=other_arguments_lengths, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Targuments", _op.get_attr("Targuments"), + "num_elements_per_branch", + _op._get_attr_int("num_elements_per_branch"), "branches", + _op.get_attr("branches"), "other_arguments_lengths", + _op.get_attr("other_arguments_lengths"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ChooseFastestBranchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ChooseFastestBranchDataset = tf_export("raw_ops.ChooseFastestBranchDataset")(_ops.to_raw_op(choose_fastest_branch_dataset)) + + +def choose_fastest_branch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], ratio_numerator: Annotated[Any, _atypes.Int64], ratio_denominator: Annotated[Any, _atypes.Int64], other_arguments, num_elements_per_branch: int, branches, other_arguments_lengths, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + num_elements_per_branch = _execute.make_int(num_elements_per_branch, "num_elements_per_branch") + if not isinstance(branches, (list, tuple)): + raise TypeError( + "Expected list for 'branches' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % branches) + if not isinstance(other_arguments_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'other_arguments_lengths' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % other_arguments_lengths) + other_arguments_lengths = [_execute.make_int(_i, "other_arguments_lengths") for _i in other_arguments_lengths] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'choose_fastest_branch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + ratio_numerator = _ops.convert_to_tensor(ratio_numerator, _dtypes.int64) + ratio_denominator = _ops.convert_to_tensor(ratio_denominator, _dtypes.int64) + _inputs_flat = [input_dataset, ratio_numerator, ratio_denominator] + list(other_arguments) + _attrs = ("Targuments", _attr_Targuments, "num_elements_per_branch", + num_elements_per_branch, "branches", branches, "other_arguments_lengths", + other_arguments_lengths, "output_types", output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"ChooseFastestBranchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ChooseFastestBranchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def choose_fastest_dataset(input_datasets: Annotated[List[Any], _atypes.Variant], num_experiments: int, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_datasets: A list of at least 2 `Tensor` objects with type `variant`. + num_experiments: An `int`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ChooseFastestDataset", name, input_datasets, "num_experiments", + num_experiments, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return choose_fastest_dataset_eager_fallback( + input_datasets, num_experiments=num_experiments, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'choose_fastest_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + num_experiments = _execute.make_int(num_experiments, "num_experiments") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'choose_fastest_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'choose_fastest_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ChooseFastestDataset", input_datasets=input_datasets, + num_experiments=num_experiments, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "num_experiments", + _op._get_attr_int("num_experiments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ChooseFastestDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ChooseFastestDataset = tf_export("raw_ops.ChooseFastestDataset")(_ops.to_raw_op(choose_fastest_dataset)) + + +def choose_fastest_dataset_eager_fallback(input_datasets: Annotated[List[Any], _atypes.Variant], num_experiments: int, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'choose_fastest_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + num_experiments = _execute.make_int(num_experiments, "num_experiments") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'choose_fastest_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'choose_fastest_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_datasets = _ops.convert_n_to_tensor(input_datasets, _dtypes.variant) + _inputs_flat = list(input_datasets) + _attrs = ("N", _attr_N, "num_experiments", num_experiments, "output_types", + output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ChooseFastestDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ChooseFastestDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def compress_element(components, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Compresses a dataset element. + + Args: + components: A list of `Tensor` objects. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CompressElement", name, components) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return compress_element_eager_fallback( + components, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CompressElement", components=components, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("input_types", _op.get_attr("input_types")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CompressElement", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CompressElement = tf_export("raw_ops.CompressElement")(_ops.to_raw_op(compress_element)) + + +def compress_element_eager_fallback(components, name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_input_types, components = _execute.convert_to_mixed_eager_tensors(components, ctx) + _inputs_flat = list(components) + _attrs = ("input_types", _attr_input_types) + _result = _execute.execute(b"CompressElement", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CompressElement", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def compute_batch_size(input_dataset: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Int64]: + r"""Computes the static batch size of a dataset sans partial batches. + + Args: + input_dataset: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ComputeBatchSize", name, input_dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return compute_batch_size_eager_fallback( + input_dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ComputeBatchSize", input_dataset=input_dataset, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "ComputeBatchSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ComputeBatchSize = tf_export("raw_ops.ComputeBatchSize")(_ops.to_raw_op(compute_batch_size)) + + +def compute_batch_size_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Int64]: + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = None + _result = _execute.execute(b"ComputeBatchSize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ComputeBatchSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def data_service_dataset(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, task_refresh_interval_hint_ms:int=-1, data_transfer_protocol:str="", target_workers:str="AUTO", cross_trainer_cache_options:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that reads data from the tf.data service. + + Args: + dataset_id: A `Tensor` of type `int64`. + processing_mode: A `Tensor` of type `string`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + job_name: A `Tensor` of type `string`. + max_outstanding_requests: A `Tensor` of type `int64`. + iteration_counter: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + task_refresh_interval_hint_ms: An optional `int`. Defaults to `-1`. + data_transfer_protocol: An optional `string`. Defaults to `""`. + target_workers: An optional `string`. Defaults to `"AUTO"`. + cross_trainer_cache_options: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DataServiceDataset", name, dataset_id, processing_mode, + address, protocol, job_name, max_outstanding_requests, + iteration_counter, "task_refresh_interval_hint_ms", + task_refresh_interval_hint_ms, "output_types", output_types, + "output_shapes", output_shapes, "data_transfer_protocol", + data_transfer_protocol, "target_workers", target_workers, + "cross_trainer_cache_options", cross_trainer_cache_options) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return data_service_dataset_eager_fallback( + dataset_id, processing_mode, address, protocol, job_name, + max_outstanding_requests, iteration_counter, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + output_types=output_types, output_shapes=output_shapes, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + cross_trainer_cache_options=cross_trainer_cache_options, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DataServiceDataset", dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, protocol=protocol, + job_name=job_name, + max_outstanding_requests=max_outstanding_requests, + iteration_counter=iteration_counter, + output_types=output_types, + output_shapes=output_shapes, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + cross_trainer_cache_options=cross_trainer_cache_options, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("task_refresh_interval_hint_ms", + _op._get_attr_int("task_refresh_interval_hint_ms"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "data_transfer_protocol", + _op.get_attr("data_transfer_protocol"), "target_workers", + _op.get_attr("target_workers"), "cross_trainer_cache_options", + _op.get_attr("cross_trainer_cache_options")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DataServiceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DataServiceDataset = tf_export("raw_ops.DataServiceDataset")(_ops.to_raw_op(data_service_dataset)) + + +def data_service_dataset_eager_fallback(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, task_refresh_interval_hint_ms: int, data_transfer_protocol: str, target_workers: str, cross_trainer_cache_options: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + dataset_id = _ops.convert_to_tensor(dataset_id, _dtypes.int64) + processing_mode = _ops.convert_to_tensor(processing_mode, _dtypes.string) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + job_name = _ops.convert_to_tensor(job_name, _dtypes.string) + max_outstanding_requests = _ops.convert_to_tensor(max_outstanding_requests, _dtypes.int64) + iteration_counter = _ops.convert_to_tensor(iteration_counter, _dtypes.resource) + _inputs_flat = [dataset_id, processing_mode, address, protocol, job_name, max_outstanding_requests, iteration_counter] + _attrs = ("task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "cross_trainer_cache_options", cross_trainer_cache_options) + _result = _execute.execute(b"DataServiceDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DataServiceDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def data_service_dataset_v2(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, task_refresh_interval_hint_ms:int=-1, data_transfer_protocol:str="", target_workers:str="AUTO", cross_trainer_cache_options:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that reads data from the tf.data service. + + Args: + dataset_id: A `Tensor` of type `int64`. + processing_mode: A `Tensor` of type `string`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + job_name: A `Tensor` of type `string`. + consumer_index: A `Tensor` of type `int64`. + num_consumers: A `Tensor` of type `int64`. + max_outstanding_requests: A `Tensor` of type `int64`. + iteration_counter: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + task_refresh_interval_hint_ms: An optional `int`. Defaults to `-1`. + data_transfer_protocol: An optional `string`. Defaults to `""`. + target_workers: An optional `string`. Defaults to `"AUTO"`. + cross_trainer_cache_options: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DataServiceDatasetV2", name, dataset_id, processing_mode, + address, protocol, job_name, consumer_index, num_consumers, + max_outstanding_requests, iteration_counter, + "task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "cross_trainer_cache_options", + cross_trainer_cache_options) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return data_service_dataset_v2_eager_fallback( + dataset_id, processing_mode, address, protocol, job_name, + consumer_index, num_consumers, max_outstanding_requests, + iteration_counter, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + output_types=output_types, output_shapes=output_shapes, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + cross_trainer_cache_options=cross_trainer_cache_options, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DataServiceDatasetV2", dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, protocol=protocol, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + iteration_counter=iteration_counter, + output_types=output_types, + output_shapes=output_shapes, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + cross_trainer_cache_options=cross_trainer_cache_options, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("task_refresh_interval_hint_ms", + _op._get_attr_int("task_refresh_interval_hint_ms"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "data_transfer_protocol", + _op.get_attr("data_transfer_protocol"), "target_workers", + _op.get_attr("target_workers"), "cross_trainer_cache_options", + _op.get_attr("cross_trainer_cache_options")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DataServiceDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DataServiceDatasetV2 = tf_export("raw_ops.DataServiceDatasetV2")(_ops.to_raw_op(data_service_dataset_v2)) + + +def data_service_dataset_v2_eager_fallback(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, task_refresh_interval_hint_ms: int, data_transfer_protocol: str, target_workers: str, cross_trainer_cache_options: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + dataset_id = _ops.convert_to_tensor(dataset_id, _dtypes.int64) + processing_mode = _ops.convert_to_tensor(processing_mode, _dtypes.string) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + job_name = _ops.convert_to_tensor(job_name, _dtypes.string) + consumer_index = _ops.convert_to_tensor(consumer_index, _dtypes.int64) + num_consumers = _ops.convert_to_tensor(num_consumers, _dtypes.int64) + max_outstanding_requests = _ops.convert_to_tensor(max_outstanding_requests, _dtypes.int64) + iteration_counter = _ops.convert_to_tensor(iteration_counter, _dtypes.resource) + _inputs_flat = [dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter] + _attrs = ("task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "cross_trainer_cache_options", cross_trainer_cache_options) + _result = _execute.execute(b"DataServiceDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DataServiceDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def data_service_dataset_v3(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, uncompress_fn, task_refresh_interval_hint_ms:int=-1, data_transfer_protocol:str="", target_workers:str="AUTO", uncompress:bool=False, cross_trainer_cache_options:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that reads data from the tf.data service. + + Args: + dataset_id: A `Tensor` of type `int64`. + processing_mode: A `Tensor` of type `string`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + job_name: A `Tensor` of type `string`. + consumer_index: A `Tensor` of type `int64`. + num_consumers: A `Tensor` of type `int64`. + max_outstanding_requests: A `Tensor` of type `int64`. + iteration_counter: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + uncompress_fn: A function decorated with @Defun. + task_refresh_interval_hint_ms: An optional `int`. Defaults to `-1`. + data_transfer_protocol: An optional `string`. Defaults to `""`. + target_workers: An optional `string`. Defaults to `"AUTO"`. + uncompress: An optional `bool`. Defaults to `False`. + cross_trainer_cache_options: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DataServiceDatasetV3", name, dataset_id, processing_mode, + address, protocol, job_name, consumer_index, num_consumers, + max_outstanding_requests, iteration_counter, + "task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "uncompress", uncompress, "uncompress_fn", + uncompress_fn, "cross_trainer_cache_options", + cross_trainer_cache_options) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return data_service_dataset_v3_eager_fallback( + dataset_id, processing_mode, address, protocol, job_name, + consumer_index, num_consumers, max_outstanding_requests, + iteration_counter, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + output_types=output_types, output_shapes=output_shapes, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, uncompress=uncompress, + uncompress_fn=uncompress_fn, + cross_trainer_cache_options=cross_trainer_cache_options, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if uncompress is None: + uncompress = False + uncompress = _execute.make_bool(uncompress, "uncompress") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DataServiceDatasetV3", dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, protocol=protocol, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + iteration_counter=iteration_counter, + output_types=output_types, + output_shapes=output_shapes, + uncompress_fn=uncompress_fn, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + uncompress=uncompress, + cross_trainer_cache_options=cross_trainer_cache_options, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("task_refresh_interval_hint_ms", + _op._get_attr_int("task_refresh_interval_hint_ms"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "data_transfer_protocol", + _op.get_attr("data_transfer_protocol"), "target_workers", + _op.get_attr("target_workers"), "uncompress", + _op._get_attr_bool("uncompress"), "uncompress_fn", + _op.get_attr("uncompress_fn"), "cross_trainer_cache_options", + _op.get_attr("cross_trainer_cache_options")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DataServiceDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DataServiceDatasetV3 = tf_export("raw_ops.DataServiceDatasetV3")(_ops.to_raw_op(data_service_dataset_v3)) + + +def data_service_dataset_v3_eager_fallback(dataset_id: Annotated[Any, _atypes.Int64], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, uncompress_fn, task_refresh_interval_hint_ms: int, data_transfer_protocol: str, target_workers: str, uncompress: bool, cross_trainer_cache_options: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v3' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v3' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if uncompress is None: + uncompress = False + uncompress = _execute.make_bool(uncompress, "uncompress") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + dataset_id = _ops.convert_to_tensor(dataset_id, _dtypes.int64) + processing_mode = _ops.convert_to_tensor(processing_mode, _dtypes.string) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + job_name = _ops.convert_to_tensor(job_name, _dtypes.string) + consumer_index = _ops.convert_to_tensor(consumer_index, _dtypes.int64) + num_consumers = _ops.convert_to_tensor(num_consumers, _dtypes.int64) + max_outstanding_requests = _ops.convert_to_tensor(max_outstanding_requests, _dtypes.int64) + iteration_counter = _ops.convert_to_tensor(iteration_counter, _dtypes.resource) + _inputs_flat = [dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter] + _attrs = ("task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "uncompress", uncompress, "uncompress_fn", uncompress_fn, + "cross_trainer_cache_options", cross_trainer_cache_options) + _result = _execute.execute(b"DataServiceDatasetV3", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DataServiceDatasetV3", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def data_service_dataset_v4(dataset_id: Annotated[Any, _atypes.String], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, uncompress_fn, task_refresh_interval_hint_ms:int=-1, data_transfer_protocol:str="", target_workers:str="AUTO", uncompress:bool=False, cross_trainer_cache_options:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that reads data from the tf.data service. + + Args: + dataset_id: A `Tensor` of type `string`. + processing_mode: A `Tensor` of type `string`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + job_name: A `Tensor` of type `string`. + consumer_index: A `Tensor` of type `int64`. + num_consumers: A `Tensor` of type `int64`. + max_outstanding_requests: A `Tensor` of type `int64`. + iteration_counter: A `Tensor` of type `resource`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + uncompress_fn: A function decorated with @Defun. + task_refresh_interval_hint_ms: An optional `int`. Defaults to `-1`. + data_transfer_protocol: An optional `string`. Defaults to `""`. + target_workers: An optional `string`. Defaults to `"AUTO"`. + uncompress: An optional `bool`. Defaults to `False`. + cross_trainer_cache_options: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DataServiceDatasetV4", name, dataset_id, processing_mode, + address, protocol, job_name, consumer_index, num_consumers, + max_outstanding_requests, iteration_counter, + "task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "uncompress", uncompress, "uncompress_fn", + uncompress_fn, "cross_trainer_cache_options", + cross_trainer_cache_options) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return data_service_dataset_v4_eager_fallback( + dataset_id, processing_mode, address, protocol, job_name, + consumer_index, num_consumers, max_outstanding_requests, + iteration_counter, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + output_types=output_types, output_shapes=output_shapes, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, uncompress=uncompress, + uncompress_fn=uncompress_fn, + cross_trainer_cache_options=cross_trainer_cache_options, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v4' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v4' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if uncompress is None: + uncompress = False + uncompress = _execute.make_bool(uncompress, "uncompress") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DataServiceDatasetV4", dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, protocol=protocol, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + iteration_counter=iteration_counter, + output_types=output_types, + output_shapes=output_shapes, + uncompress_fn=uncompress_fn, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + data_transfer_protocol=data_transfer_protocol, + target_workers=target_workers, + uncompress=uncompress, + cross_trainer_cache_options=cross_trainer_cache_options, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("task_refresh_interval_hint_ms", + _op._get_attr_int("task_refresh_interval_hint_ms"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "data_transfer_protocol", + _op.get_attr("data_transfer_protocol"), "target_workers", + _op.get_attr("target_workers"), "uncompress", + _op._get_attr_bool("uncompress"), "uncompress_fn", + _op.get_attr("uncompress_fn"), "cross_trainer_cache_options", + _op.get_attr("cross_trainer_cache_options")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DataServiceDatasetV4", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DataServiceDatasetV4 = tf_export("raw_ops.DataServiceDatasetV4")(_ops.to_raw_op(data_service_dataset_v4)) + + +def data_service_dataset_v4_eager_fallback(dataset_id: Annotated[Any, _atypes.String], processing_mode: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], job_name: Annotated[Any, _atypes.String], consumer_index: Annotated[Any, _atypes.Int64], num_consumers: Annotated[Any, _atypes.Int64], max_outstanding_requests: Annotated[Any, _atypes.Int64], iteration_counter: Annotated[Any, _atypes.Resource], output_types, output_shapes, uncompress_fn, task_refresh_interval_hint_ms: int, data_transfer_protocol: str, target_workers: str, uncompress: bool, cross_trainer_cache_options: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'data_service_dataset_v4' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'data_service_dataset_v4' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = -1 + task_refresh_interval_hint_ms = _execute.make_int(task_refresh_interval_hint_ms, "task_refresh_interval_hint_ms") + if data_transfer_protocol is None: + data_transfer_protocol = "" + data_transfer_protocol = _execute.make_str(data_transfer_protocol, "data_transfer_protocol") + if target_workers is None: + target_workers = "AUTO" + target_workers = _execute.make_str(target_workers, "target_workers") + if uncompress is None: + uncompress = False + uncompress = _execute.make_bool(uncompress, "uncompress") + if cross_trainer_cache_options is None: + cross_trainer_cache_options = "" + cross_trainer_cache_options = _execute.make_str(cross_trainer_cache_options, "cross_trainer_cache_options") + dataset_id = _ops.convert_to_tensor(dataset_id, _dtypes.string) + processing_mode = _ops.convert_to_tensor(processing_mode, _dtypes.string) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + job_name = _ops.convert_to_tensor(job_name, _dtypes.string) + consumer_index = _ops.convert_to_tensor(consumer_index, _dtypes.int64) + num_consumers = _ops.convert_to_tensor(num_consumers, _dtypes.int64) + max_outstanding_requests = _ops.convert_to_tensor(max_outstanding_requests, _dtypes.int64) + iteration_counter = _ops.convert_to_tensor(iteration_counter, _dtypes.resource) + _inputs_flat = [dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter] + _attrs = ("task_refresh_interval_hint_ms", task_refresh_interval_hint_ms, + "output_types", output_types, "output_shapes", output_shapes, + "data_transfer_protocol", data_transfer_protocol, "target_workers", + target_workers, "uncompress", uncompress, "uncompress_fn", uncompress_fn, + "cross_trainer_cache_options", cross_trainer_cache_options) + _result = _execute.execute(b"DataServiceDatasetV4", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DataServiceDatasetV4", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_from_graph(graph_def: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset from the given `graph_def`. + + Creates a dataset from the provided `graph_def`. + + Args: + graph_def: A `Tensor` of type `string`. + The graph representation of the dataset (as serialized GraphDef). + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetFromGraph", name, graph_def) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_from_graph_eager_fallback( + graph_def, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetFromGraph", graph_def=graph_def, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DatasetFromGraph", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DatasetFromGraph = tf_export("raw_ops.DatasetFromGraph")(_ops.to_raw_op(dataset_from_graph)) + + +def dataset_from_graph_eager_fallback(graph_def: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Variant]: + graph_def = _ops.convert_to_tensor(graph_def, _dtypes.string) + _inputs_flat = [graph_def] + _attrs = None + _result = _execute.execute(b"DatasetFromGraph", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DatasetFromGraph", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def dataset_to_tf_record(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], name=None): + r"""Writes the given dataset to the given file using the TFRecord format. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to write. + filename: A `Tensor` of type `string`. + A scalar string tensor representing the filename to use. + compression_type: A `Tensor` of type `string`. + A scalar string tensor containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DatasetToTFRecord", name, input_dataset, filename, + compression_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dataset_to_tf_record_eager_fallback( + input_dataset, filename, compression_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DatasetToTFRecord", input_dataset=input_dataset, filename=filename, + compression_type=compression_type, name=name) + return _op +DatasetToTFRecord = tf_export("raw_ops.DatasetToTFRecord")(_ops.to_raw_op(dataset_to_tf_record)) + + +def dataset_to_tf_record_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], name, ctx): + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + filename = _ops.convert_to_tensor(filename, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + _inputs_flat = [input_dataset, filename, compression_type] + _attrs = None + _result = _execute.execute(b"DatasetToTFRecord", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def dense_to_sparse_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], row_shape: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches input elements into a SparseTensor. + + Args: + input_dataset: A `Tensor` of type `variant`. + A handle to an input dataset. Must have a single component. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. + row_shape: A `Tensor` of type `int64`. + A vector representing the dense shape of each row in the produced + SparseTensor. The shape may be partially specified, using `-1` to indicate + that a particular dimension should use the maximum size of all batch elements. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DenseToSparseBatchDataset", name, input_dataset, batch_size, + row_shape, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dense_to_sparse_batch_dataset_eager_fallback( + input_dataset, batch_size, row_shape, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'dense_to_sparse_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'dense_to_sparse_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DenseToSparseBatchDataset", input_dataset=input_dataset, + batch_size=batch_size, + row_shape=row_shape, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DenseToSparseBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DenseToSparseBatchDataset = tf_export("raw_ops.DenseToSparseBatchDataset")(_ops.to_raw_op(dense_to_sparse_batch_dataset)) + + +def dense_to_sparse_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], row_shape: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'dense_to_sparse_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'dense_to_sparse_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + row_shape = _ops.convert_to_tensor(row_shape, _dtypes.int64) + _inputs_flat = [input_dataset, batch_size, row_shape] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"DenseToSparseBatchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DenseToSparseBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def directed_interleave_dataset(selector_input_dataset: Annotated[Any, _atypes.Variant], data_input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, stop_on_empty_dataset:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + + Args: + selector_input_dataset: A `Tensor` of type `variant`. + A dataset of scalar `DT_INT64` elements that determines which of the + `N` data inputs should produce the next output element. + data_input_datasets: A list of at least 1 `Tensor` objects with type `variant`. + `N` datasets with the same type that will be interleaved according to + the values of `selector_input_dataset`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + stop_on_empty_dataset: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DirectedInterleaveDataset", name, selector_input_dataset, + data_input_datasets, "output_types", output_types, "output_shapes", + output_shapes, "stop_on_empty_dataset", stop_on_empty_dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return directed_interleave_dataset_eager_fallback( + selector_input_dataset, data_input_datasets, + output_types=output_types, output_shapes=output_shapes, + stop_on_empty_dataset=stop_on_empty_dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(data_input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'data_input_datasets' argument to " + "'directed_interleave_dataset' Op, not %r." % data_input_datasets) + _attr_N = len(data_input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'directed_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'directed_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if stop_on_empty_dataset is None: + stop_on_empty_dataset = False + stop_on_empty_dataset = _execute.make_bool(stop_on_empty_dataset, "stop_on_empty_dataset") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DirectedInterleaveDataset", selector_input_dataset=selector_input_dataset, + data_input_datasets=data_input_datasets, + output_types=output_types, + output_shapes=output_shapes, + stop_on_empty_dataset=stop_on_empty_dataset, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N"), + "stop_on_empty_dataset", + _op._get_attr_bool("stop_on_empty_dataset")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DirectedInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DirectedInterleaveDataset = tf_export("raw_ops.DirectedInterleaveDataset")(_ops.to_raw_op(directed_interleave_dataset)) + + +def directed_interleave_dataset_eager_fallback(selector_input_dataset: Annotated[Any, _atypes.Variant], data_input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, stop_on_empty_dataset: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(data_input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'data_input_datasets' argument to " + "'directed_interleave_dataset' Op, not %r." % data_input_datasets) + _attr_N = len(data_input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'directed_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'directed_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if stop_on_empty_dataset is None: + stop_on_empty_dataset = False + stop_on_empty_dataset = _execute.make_bool(stop_on_empty_dataset, "stop_on_empty_dataset") + selector_input_dataset = _ops.convert_to_tensor(selector_input_dataset, _dtypes.variant) + data_input_datasets = _ops.convert_n_to_tensor(data_input_datasets, _dtypes.variant) + _inputs_flat = [selector_input_dataset] + list(data_input_datasets) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, "N", + _attr_N, "stop_on_empty_dataset", stop_on_empty_dataset) + _result = _execute.execute(b"DirectedInterleaveDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DirectedInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def distributed_save(dataset: Annotated[Any, _atypes.Variant], directory: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], metadata:str="", name=None): + r"""TODO: add doc. + + Args: + dataset: A `Tensor` of type `variant`. + directory: A `Tensor` of type `string`. + address: A `Tensor` of type `string`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DistributedSave", name, dataset, directory, address, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return distributed_save_eager_fallback( + dataset, directory, address, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DistributedSave", dataset=dataset, directory=directory, + address=address, metadata=metadata, name=name) + return _op +DistributedSave = tf_export("raw_ops.DistributedSave")(_ops.to_raw_op(distributed_save)) + + +def distributed_save_eager_fallback(dataset: Annotated[Any, _atypes.Variant], directory: Annotated[Any, _atypes.String], address: Annotated[Any, _atypes.String], metadata: str, name, ctx): + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + directory = _ops.convert_to_tensor(directory, _dtypes.string) + address = _ops.convert_to_tensor(address, _dtypes.string) + _inputs_flat = [dataset, directory, address] + _attrs = ("metadata", metadata) + _result = _execute.execute(b"DistributedSave", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def dummy_iteration_counter(name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DummyIterationCounter", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dummy_iteration_counter_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DummyIterationCounter", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DummyIterationCounter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DummyIterationCounter = tf_export("raw_ops.DummyIterationCounter")(_ops.to_raw_op(dummy_iteration_counter)) + + +def dummy_iteration_counter_eager_fallback(name, ctx) -> Annotated[Any, _atypes.Resource]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"DummyIterationCounter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DummyIterationCounter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_assert_next_dataset(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + transformations: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalAssertNextDataset", name, input_dataset, + transformations, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_assert_next_dataset_eager_fallback( + input_dataset, transformations, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_assert_next_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_assert_next_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalAssertNextDataset", input_dataset=input_dataset, + transformations=transformations, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalAssertNextDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalAssertNextDataset = tf_export("raw_ops.ExperimentalAssertNextDataset")(_ops.to_raw_op(experimental_assert_next_dataset)) + + +def experimental_assert_next_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], transformations: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_assert_next_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_assert_next_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + transformations = _ops.convert_to_tensor(transformations, _dtypes.string) + _inputs_flat = [input_dataset, transformations] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalAssertNextDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalAssertNextDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_auto_shard_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_workers: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, auto_shard_policy:int=0, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that shards the input dataset. + + Creates a dataset that shards the input dataset by num_workers, returning a + sharded dataset for the index-th worker. This attempts to automatically shard + a dataset by examining the Dataset graph and inserting a shard op before the + inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + + This dataset will throw a NotFound error if we cannot shard the dataset + automatically. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + num_workers: A `Tensor` of type `int64`. + A scalar representing the number of workers to distribute this dataset across. + index: A `Tensor` of type `int64`. + A scalar representing the index of the current worker out of num_workers. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + auto_shard_policy: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalAutoShardDataset", name, input_dataset, + num_workers, index, "auto_shard_policy", auto_shard_policy, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_auto_shard_dataset_eager_fallback( + input_dataset, num_workers, index, + auto_shard_policy=auto_shard_policy, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_auto_shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_auto_shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if auto_shard_policy is None: + auto_shard_policy = 0 + auto_shard_policy = _execute.make_int(auto_shard_policy, "auto_shard_policy") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalAutoShardDataset", input_dataset=input_dataset, + num_workers=num_workers, index=index, + output_types=output_types, + output_shapes=output_shapes, + auto_shard_policy=auto_shard_policy, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("auto_shard_policy", _op._get_attr_int("auto_shard_policy"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalAutoShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalAutoShardDataset = tf_export("raw_ops.ExperimentalAutoShardDataset")(_ops.to_raw_op(experimental_auto_shard_dataset)) + + +def experimental_auto_shard_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_workers: Annotated[Any, _atypes.Int64], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, auto_shard_policy: int, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_auto_shard_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_auto_shard_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if auto_shard_policy is None: + auto_shard_policy = 0 + auto_shard_policy = _execute.make_int(auto_shard_policy, "auto_shard_policy") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_workers = _ops.convert_to_tensor(num_workers, _dtypes.int64) + index = _ops.convert_to_tensor(index, _dtypes.int64) + _inputs_flat = [input_dataset, num_workers, index] + _attrs = ("auto_shard_policy", auto_shard_policy, "output_types", + output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalAutoShardDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalAutoShardDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_bytes_produced_stats_dataset(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Records the bytes size of each element of `input_dataset` in a StatsAggregator. + + Args: + input_dataset: A `Tensor` of type `variant`. + tag: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalBytesProducedStatsDataset", name, input_dataset, + tag, "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_bytes_produced_stats_dataset_eager_fallback( + input_dataset, tag, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_bytes_produced_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_bytes_produced_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalBytesProducedStatsDataset", input_dataset=input_dataset, + tag=tag, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalBytesProducedStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalBytesProducedStatsDataset = tf_export("raw_ops.ExperimentalBytesProducedStatsDataset")(_ops.to_raw_op(experimental_bytes_produced_stats_dataset)) + + +def experimental_bytes_produced_stats_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_bytes_produced_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_bytes_produced_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + _inputs_flat = [input_dataset, tag] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalBytesProducedStatsDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalBytesProducedStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_csv_dataset(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + filenames: A `Tensor` of type `string`. + compression_type: A `Tensor` of type `string`. + buffer_size: A `Tensor` of type `int64`. + header: A `Tensor` of type `bool`. + field_delim: A `Tensor` of type `string`. + use_quote_delim: A `Tensor` of type `bool`. + na_value: A `Tensor` of type `string`. + select_cols: A `Tensor` of type `int64`. + record_defaults: A list of `Tensor` objects with types from: `float32`, `float64`, `int32`, `int64`, `string`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalCSVDataset", name, filenames, compression_type, + buffer_size, header, field_delim, use_quote_delim, na_value, + select_cols, record_defaults, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_csv_dataset_eager_fallback( + filenames, compression_type, buffer_size, header, field_delim, + use_quote_delim, na_value, select_cols, record_defaults, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_csv_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalCSVDataset", filenames=filenames, + compression_type=compression_type, + buffer_size=buffer_size, header=header, + field_delim=field_delim, + use_quote_delim=use_quote_delim, + na_value=na_value, select_cols=select_cols, + record_defaults=record_defaults, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalCSVDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalCSVDataset = tf_export("raw_ops.ExperimentalCSVDataset")(_ops.to_raw_op(experimental_csv_dataset)) + + +def experimental_csv_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], buffer_size: Annotated[Any, _atypes.Int64], header: Annotated[Any, _atypes.Bool], field_delim: Annotated[Any, _atypes.String], use_quote_delim: Annotated[Any, _atypes.Bool], na_value: Annotated[Any, _atypes.String], select_cols: Annotated[Any, _atypes.Int64], record_defaults, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_csv_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_output_types, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, ctx) + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64) + header = _ops.convert_to_tensor(header, _dtypes.bool) + field_delim = _ops.convert_to_tensor(field_delim, _dtypes.string) + use_quote_delim = _ops.convert_to_tensor(use_quote_delim, _dtypes.bool) + na_value = _ops.convert_to_tensor(na_value, _dtypes.string) + select_cols = _ops.convert_to_tensor(select_cols, _dtypes.int64) + _inputs_flat = [filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols] + list(record_defaults) + _attrs = ("output_types", _attr_output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"ExperimentalCSVDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalCSVDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_choose_fastest_dataset(input_datasets: Annotated[List[Any], _atypes.Variant], num_experiments: int, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_datasets: A list of at least 2 `Tensor` objects with type `variant`. + num_experiments: An `int`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalChooseFastestDataset", name, input_datasets, + "num_experiments", num_experiments, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_choose_fastest_dataset_eager_fallback( + input_datasets, num_experiments=num_experiments, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + num_experiments = _execute.make_int(num_experiments, "num_experiments") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalChooseFastestDataset", input_datasets=input_datasets, + num_experiments=num_experiments, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "num_experiments", + _op._get_attr_int("num_experiments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalChooseFastestDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalChooseFastestDataset = tf_export("raw_ops.ExperimentalChooseFastestDataset")(_ops.to_raw_op(experimental_choose_fastest_dataset)) + + +def experimental_choose_fastest_dataset_eager_fallback(input_datasets: Annotated[List[Any], _atypes.Variant], num_experiments: int, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'input_datasets' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % input_datasets) + _attr_N = len(input_datasets) + num_experiments = _execute.make_int(num_experiments, "num_experiments") + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_choose_fastest_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_datasets = _ops.convert_n_to_tensor(input_datasets, _dtypes.variant) + _inputs_flat = list(input_datasets) + _attrs = ("N", _attr_N, "num_experiments", num_experiments, "output_types", + output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalChooseFastestDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalChooseFastestDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_dataset_cardinality(input_dataset: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Int64]: + r"""Returns the cardinality of `input_dataset`. + + Returns the cardinality of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to return cardinality for. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalDatasetCardinality", name, input_dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_dataset_cardinality_eager_fallback( + input_dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalDatasetCardinality", input_dataset=input_dataset, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalDatasetCardinality", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalDatasetCardinality = tf_export("raw_ops.ExperimentalDatasetCardinality")(_ops.to_raw_op(experimental_dataset_cardinality)) + + +def experimental_dataset_cardinality_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Int64]: + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = None + _result = _execute.execute(b"ExperimentalDatasetCardinality", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalDatasetCardinality", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_dataset_to_tf_record(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], name=None): + r"""Writes the given dataset to the given file using the TFRecord format. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the dataset to write. + filename: A `Tensor` of type `string`. + A scalar string tensor representing the filename to use. + compression_type: A `Tensor` of type `string`. + A scalar string tensor containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalDatasetToTFRecord", name, input_dataset, filename, + compression_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_dataset_to_tf_record_eager_fallback( + input_dataset, filename, compression_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalDatasetToTFRecord", input_dataset=input_dataset, + filename=filename, + compression_type=compression_type, + name=name) + return _op +ExperimentalDatasetToTFRecord = tf_export("raw_ops.ExperimentalDatasetToTFRecord")(_ops.to_raw_op(experimental_dataset_to_tf_record)) + + +def experimental_dataset_to_tf_record_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], filename: Annotated[Any, _atypes.String], compression_type: Annotated[Any, _atypes.String], name, ctx): + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + filename = _ops.convert_to_tensor(filename, _dtypes.string) + compression_type = _ops.convert_to_tensor(compression_type, _dtypes.string) + _inputs_flat = [input_dataset, filename, compression_type] + _attrs = None + _result = _execute.execute(b"ExperimentalDatasetToTFRecord", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def experimental_dense_to_sparse_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], row_shape: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that batches input elements into a SparseTensor. + + Args: + input_dataset: A `Tensor` of type `variant`. + A handle to an input dataset. Must have a single component. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. + row_shape: A `Tensor` of type `int64`. + A vector representing the dense shape of each row in the produced + SparseTensor. The shape may be partially specified, using `-1` to indicate + that a particular dimension should use the maximum size of all batch elements. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalDenseToSparseBatchDataset", name, input_dataset, + batch_size, row_shape, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_dense_to_sparse_batch_dataset_eager_fallback( + input_dataset, batch_size, row_shape, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_dense_to_sparse_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_dense_to_sparse_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalDenseToSparseBatchDataset", input_dataset=input_dataset, + batch_size=batch_size, + row_shape=row_shape, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalDenseToSparseBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalDenseToSparseBatchDataset = tf_export("raw_ops.ExperimentalDenseToSparseBatchDataset")(_ops.to_raw_op(experimental_dense_to_sparse_batch_dataset)) + + +def experimental_dense_to_sparse_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_size: Annotated[Any, _atypes.Int64], row_shape: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_dense_to_sparse_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_dense_to_sparse_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + row_shape = _ops.convert_to_tensor(row_shape, _dtypes.int64) + _inputs_flat = [input_dataset, batch_size, row_shape] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalDenseToSparseBatchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalDenseToSparseBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_directed_interleave_dataset(selector_input_dataset: Annotated[Any, _atypes.Variant], data_input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + + Args: + selector_input_dataset: A `Tensor` of type `variant`. + A dataset of scalar `DT_INT64` elements that determines which of the + `N` data inputs should produce the next output element. + data_input_datasets: A list of at least 1 `Tensor` objects with type `variant`. + `N` datasets with the same type that will be interleaved according to + the values of `selector_input_dataset`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalDirectedInterleaveDataset", name, + selector_input_dataset, data_input_datasets, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_directed_interleave_dataset_eager_fallback( + selector_input_dataset, data_input_datasets, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(data_input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'data_input_datasets' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % data_input_datasets) + _attr_N = len(data_input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalDirectedInterleaveDataset", selector_input_dataset=selector_input_dataset, + data_input_datasets=data_input_datasets, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalDirectedInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalDirectedInterleaveDataset = tf_export("raw_ops.ExperimentalDirectedInterleaveDataset")(_ops.to_raw_op(experimental_directed_interleave_dataset)) + + +def experimental_directed_interleave_dataset_eager_fallback(selector_input_dataset: Annotated[Any, _atypes.Variant], data_input_datasets: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(data_input_datasets, (list, tuple)): + raise TypeError( + "Expected list for 'data_input_datasets' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % data_input_datasets) + _attr_N = len(data_input_datasets) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_directed_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + selector_input_dataset = _ops.convert_to_tensor(selector_input_dataset, _dtypes.variant) + data_input_datasets = _ops.convert_n_to_tensor(data_input_datasets, _dtypes.variant) + _inputs_flat = [selector_input_dataset] + list(data_input_datasets) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, "N", + _attr_N) + _result = _execute.execute(b"ExperimentalDirectedInterleaveDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalDirectedInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_group_by_reducer_dataset(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments, key_func, init_func, reduce_func, finalize_func, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that computes a group-by on `input_dataset`. + + Creates a dataset that computes a group-by on `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + key_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `key_func`. + init_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `init_func`. + reduce_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `reduce_func`. + finalize_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `finalize_func`. + key_func: A function decorated with @Defun. + A function mapping an element of `input_dataset`, concatenated + with `key_func_other_arguments` to a scalar value of type DT_INT64. + init_func: A function decorated with @Defun. + A function mapping a key of type DT_INT64, concatenated with + `init_func_other_arguments` to the initial reducer state. + reduce_func: A function decorated with @Defun. + A function mapping the current reducer state and an element of `input_dataset`, + concatenated with `reduce_func_other_arguments` to a new reducer state. + finalize_func: A function decorated with @Defun. + A function mapping the final reducer state to an output element. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalGroupByReducerDataset", name, input_dataset, + key_func_other_arguments, init_func_other_arguments, + reduce_func_other_arguments, finalize_func_other_arguments, + "key_func", key_func, "init_func", init_func, "reduce_func", + reduce_func, "finalize_func", finalize_func, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_group_by_reducer_dataset_eager_fallback( + input_dataset, key_func_other_arguments, init_func_other_arguments, + reduce_func_other_arguments, finalize_func_other_arguments, + key_func=key_func, init_func=init_func, reduce_func=reduce_func, + finalize_func=finalize_func, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_group_by_reducer_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_group_by_reducer_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalGroupByReducerDataset", input_dataset=input_dataset, + key_func_other_arguments=key_func_other_arguments, + init_func_other_arguments=init_func_other_arguments, + reduce_func_other_arguments=reduce_func_other_arguments, + finalize_func_other_arguments=finalize_func_other_arguments, + key_func=key_func, + init_func=init_func, + reduce_func=reduce_func, + finalize_func=finalize_func, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("key_func", _op.get_attr("key_func"), "init_func", + _op.get_attr("init_func"), "reduce_func", + _op.get_attr("reduce_func"), "finalize_func", + _op.get_attr("finalize_func"), "Tkey_func_other_arguments", + _op.get_attr("Tkey_func_other_arguments"), + "Tinit_func_other_arguments", + _op.get_attr("Tinit_func_other_arguments"), + "Treduce_func_other_arguments", + _op.get_attr("Treduce_func_other_arguments"), + "Tfinalize_func_other_arguments", + _op.get_attr("Tfinalize_func_other_arguments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalGroupByReducerDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalGroupByReducerDataset = tf_export("raw_ops.ExperimentalGroupByReducerDataset")(_ops.to_raw_op(experimental_group_by_reducer_dataset)) + + +def experimental_group_by_reducer_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments, key_func, init_func, reduce_func, finalize_func, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_group_by_reducer_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_group_by_reducer_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Tkey_func_other_arguments, key_func_other_arguments = _execute.convert_to_mixed_eager_tensors(key_func_other_arguments, ctx) + _attr_Tinit_func_other_arguments, init_func_other_arguments = _execute.convert_to_mixed_eager_tensors(init_func_other_arguments, ctx) + _attr_Treduce_func_other_arguments, reduce_func_other_arguments = _execute.convert_to_mixed_eager_tensors(reduce_func_other_arguments, ctx) + _attr_Tfinalize_func_other_arguments, finalize_func_other_arguments = _execute.convert_to_mixed_eager_tensors(finalize_func_other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(key_func_other_arguments) + list(init_func_other_arguments) + list(reduce_func_other_arguments) + list(finalize_func_other_arguments) + _attrs = ("key_func", key_func, "init_func", init_func, "reduce_func", + reduce_func, "finalize_func", finalize_func, "Tkey_func_other_arguments", + _attr_Tkey_func_other_arguments, "Tinit_func_other_arguments", + _attr_Tinit_func_other_arguments, "Treduce_func_other_arguments", + _attr_Treduce_func_other_arguments, "Tfinalize_func_other_arguments", + _attr_Tfinalize_func_other_arguments, "output_types", output_types, + "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalGroupByReducerDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalGroupByReducerDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_group_by_window_dataset(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments, key_func, reduce_func, window_size_func, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that computes a windowed group-by on `input_dataset`. + + // TODO(mrry): Support non-int64 keys. + + Args: + input_dataset: A `Tensor` of type `variant`. + key_func_other_arguments: A list of `Tensor` objects. + reduce_func_other_arguments: A list of `Tensor` objects. + window_size_func_other_arguments: A list of `Tensor` objects. + key_func: A function decorated with @Defun. + A function mapping an element of `input_dataset`, concatenated + with `key_func_other_arguments` to a scalar value of type DT_INT64. + reduce_func: A function decorated with @Defun. + window_size_func: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalGroupByWindowDataset", name, input_dataset, + key_func_other_arguments, reduce_func_other_arguments, + window_size_func_other_arguments, "key_func", key_func, "reduce_func", + reduce_func, "window_size_func", window_size_func, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_group_by_window_dataset_eager_fallback( + input_dataset, key_func_other_arguments, + reduce_func_other_arguments, window_size_func_other_arguments, + key_func=key_func, reduce_func=reduce_func, + window_size_func=window_size_func, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_group_by_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_group_by_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalGroupByWindowDataset", input_dataset=input_dataset, + key_func_other_arguments=key_func_other_arguments, + reduce_func_other_arguments=reduce_func_other_arguments, + window_size_func_other_arguments=window_size_func_other_arguments, + key_func=key_func, + reduce_func=reduce_func, + window_size_func=window_size_func, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("key_func", _op.get_attr("key_func"), "reduce_func", + _op.get_attr("reduce_func"), "window_size_func", + _op.get_attr("window_size_func"), "Tkey_func_other_arguments", + _op.get_attr("Tkey_func_other_arguments"), + "Treduce_func_other_arguments", + _op.get_attr("Treduce_func_other_arguments"), + "Twindow_size_func_other_arguments", + _op.get_attr("Twindow_size_func_other_arguments"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalGroupByWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalGroupByWindowDataset = tf_export("raw_ops.ExperimentalGroupByWindowDataset")(_ops.to_raw_op(experimental_group_by_window_dataset)) + + +def experimental_group_by_window_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments, key_func, reduce_func, window_size_func, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_group_by_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_group_by_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Tkey_func_other_arguments, key_func_other_arguments = _execute.convert_to_mixed_eager_tensors(key_func_other_arguments, ctx) + _attr_Treduce_func_other_arguments, reduce_func_other_arguments = _execute.convert_to_mixed_eager_tensors(reduce_func_other_arguments, ctx) + _attr_Twindow_size_func_other_arguments, window_size_func_other_arguments = _execute.convert_to_mixed_eager_tensors(window_size_func_other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(key_func_other_arguments) + list(reduce_func_other_arguments) + list(window_size_func_other_arguments) + _attrs = ("key_func", key_func, "reduce_func", reduce_func, + "window_size_func", window_size_func, "Tkey_func_other_arguments", + _attr_Tkey_func_other_arguments, "Treduce_func_other_arguments", + _attr_Treduce_func_other_arguments, "Twindow_size_func_other_arguments", + _attr_Twindow_size_func_other_arguments, "output_types", output_types, + "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalGroupByWindowDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalGroupByWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_ignore_errors_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, log_warning:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that contains the elements of `input_dataset` ignoring errors. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + log_warning: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalIgnoreErrorsDataset", name, input_dataset, + "output_types", output_types, "output_shapes", output_shapes, + "log_warning", log_warning) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_ignore_errors_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, log_warning=log_warning, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_ignore_errors_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_ignore_errors_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if log_warning is None: + log_warning = False + log_warning = _execute.make_bool(log_warning, "log_warning") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalIgnoreErrorsDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, + log_warning=log_warning, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "log_warning", + _op._get_attr_bool("log_warning")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalIgnoreErrorsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalIgnoreErrorsDataset = tf_export("raw_ops.ExperimentalIgnoreErrorsDataset")(_ops.to_raw_op(experimental_ignore_errors_dataset)) + + +def experimental_ignore_errors_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, log_warning: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_ignore_errors_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_ignore_errors_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if log_warning is None: + log_warning = False + log_warning = _execute.make_bool(log_warning, "log_warning") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "log_warning", log_warning) + _result = _execute.execute(b"ExperimentalIgnoreErrorsDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalIgnoreErrorsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_iterator_get_device(resource: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Returns the name of the device on which `resource` has been placed. + + Args: + resource: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalIteratorGetDevice", name, resource) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_iterator_get_device_eager_fallback( + resource, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalIteratorGetDevice", resource=resource, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalIteratorGetDevice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalIteratorGetDevice = tf_export("raw_ops.ExperimentalIteratorGetDevice")(_ops.to_raw_op(experimental_iterator_get_device)) + + +def experimental_iterator_get_device_eager_fallback(resource: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + _inputs_flat = [resource] + _attrs = None + _result = _execute.execute(b"ExperimentalIteratorGetDevice", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalIteratorGetDevice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_lmdb_dataset(filenames: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + filenames: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalLMDBDataset", name, filenames, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_lmdb_dataset_eager_fallback( + filenames, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_lmdb_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_lmdb_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalLMDBDataset", filenames=filenames, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalLMDBDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalLMDBDataset = tf_export("raw_ops.ExperimentalLMDBDataset")(_ops.to_raw_op(experimental_lmdb_dataset)) + + +def experimental_lmdb_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_lmdb_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_lmdb_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + _inputs_flat = [filenames] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalLMDBDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalLMDBDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_latency_stats_dataset(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Records the latency of producing `input_dataset` elements in a StatsAggregator. + + Args: + input_dataset: A `Tensor` of type `variant`. + tag: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalLatencyStatsDataset", name, input_dataset, tag, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_latency_stats_dataset_eager_fallback( + input_dataset, tag, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_latency_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_latency_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalLatencyStatsDataset", input_dataset=input_dataset, + tag=tag, output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalLatencyStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalLatencyStatsDataset = tf_export("raw_ops.ExperimentalLatencyStatsDataset")(_ops.to_raw_op(experimental_latency_stats_dataset)) + + +def experimental_latency_stats_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_latency_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_latency_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + _inputs_flat = [input_dataset, tag] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalLatencyStatsDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalLatencyStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_map_and_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], f, output_types, output_shapes, preserve_cardinality:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that fuses mapping with batching. + + Creates a dataset that applies `f` to the outputs of `input_dataset` and then + batches `batch_size` of them. + + Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + to `batch_size * num_parallel_batches` copies of `f` in parallel. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when building a closure + for `f`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. It determines the number of concurrent invocations of `f` that process + elements from `input_dataset` in parallel. + num_parallel_calls: A `Tensor` of type `int64`. + A scalar representing the maximum number of parallel invocations of the `map_fn` + function. Applying the `map_fn` on consecutive input elements in parallel has + the potential to improve input pipeline throughput. + drop_remainder: A `Tensor` of type `bool`. + A scalar representing whether the last batch should be dropped in case its size + is smaller than desired. + f: A function decorated with @Defun. + A function to apply to the outputs of `input_dataset`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalMapAndBatchDataset", name, input_dataset, + other_arguments, batch_size, num_parallel_calls, drop_remainder, "f", + f, "output_types", output_types, "output_shapes", output_shapes, + "preserve_cardinality", preserve_cardinality) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_map_and_batch_dataset_eager_fallback( + input_dataset, other_arguments, batch_size, num_parallel_calls, + drop_remainder, f=f, output_types=output_types, + output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_map_and_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_map_and_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalMapAndBatchDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + batch_size=batch_size, + num_parallel_calls=num_parallel_calls, + drop_remainder=drop_remainder, f=f, + output_types=output_types, + output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalMapAndBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalMapAndBatchDataset = tf_export("raw_ops.ExperimentalMapAndBatchDataset")(_ops.to_raw_op(experimental_map_and_batch_dataset)) + + +def experimental_map_and_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], f, output_types, output_shapes, preserve_cardinality: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_map_and_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_map_and_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset] + list(other_arguments) + [batch_size, num_parallel_calls, drop_remainder] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "preserve_cardinality", + preserve_cardinality) + _result = _execute.execute(b"ExperimentalMapAndBatchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalMapAndBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_map_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, use_inter_op_parallelism:bool=True, preserve_cardinality:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_inter_op_parallelism: An optional `bool`. Defaults to `True`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalMapDataset", name, input_dataset, other_arguments, + "f", f, "output_types", output_types, "output_shapes", output_shapes, + "use_inter_op_parallelism", use_inter_op_parallelism, + "preserve_cardinality", preserve_cardinality) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_map_dataset_eager_fallback( + input_dataset, other_arguments, f=f, output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + preserve_cardinality=preserve_cardinality, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalMapDataset", input_dataset=input_dataset, + other_arguments=other_arguments, f=f, + output_types=output_types, + output_shapes=output_shapes, + use_inter_op_parallelism=use_inter_op_parallelism, + preserve_cardinality=preserve_cardinality, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_inter_op_parallelism", + _op._get_attr_bool("use_inter_op_parallelism"), + "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalMapDataset = tf_export("raw_ops.ExperimentalMapDataset")(_ops.to_raw_op(experimental_map_dataset)) + + +def experimental_map_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, f, output_types, output_shapes, use_inter_op_parallelism: bool, preserve_cardinality: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_map_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_map_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_inter_op_parallelism is None: + use_inter_op_parallelism = True + use_inter_op_parallelism = _execute.make_bool(use_inter_op_parallelism, "use_inter_op_parallelism") + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "use_inter_op_parallelism", + use_inter_op_parallelism, "preserve_cardinality", preserve_cardinality) + _result = _execute.execute(b"ExperimentalMapDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalMapDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_matching_files_dataset(patterns: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + patterns: A `Tensor` of type `string`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalMatchingFilesDataset", name, patterns) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_matching_files_dataset_eager_fallback( + patterns, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalMatchingFilesDataset", patterns=patterns, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalMatchingFilesDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalMatchingFilesDataset = tf_export("raw_ops.ExperimentalMatchingFilesDataset")(_ops.to_raw_op(experimental_matching_files_dataset)) + + +def experimental_matching_files_dataset_eager_fallback(patterns: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Variant]: + patterns = _ops.convert_to_tensor(patterns, _dtypes.string) + _inputs_flat = [patterns] + _attrs = None + _result = _execute.execute(b"ExperimentalMatchingFilesDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalMatchingFilesDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_max_intra_op_parallelism_dataset(input_dataset: Annotated[Any, _atypes.Variant], max_intra_op_parallelism: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that overrides the maximum intra-op parallelism. + + Args: + input_dataset: A `Tensor` of type `variant`. + max_intra_op_parallelism: A `Tensor` of type `int64`. + Identifies the maximum intra-op parallelism to use. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalMaxIntraOpParallelismDataset", name, input_dataset, + max_intra_op_parallelism, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_max_intra_op_parallelism_dataset_eager_fallback( + input_dataset, max_intra_op_parallelism, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_max_intra_op_parallelism_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_max_intra_op_parallelism_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalMaxIntraOpParallelismDataset", input_dataset=input_dataset, + max_intra_op_parallelism=max_intra_op_parallelism, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalMaxIntraOpParallelismDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalMaxIntraOpParallelismDataset = tf_export("raw_ops.ExperimentalMaxIntraOpParallelismDataset")(_ops.to_raw_op(experimental_max_intra_op_parallelism_dataset)) + + +def experimental_max_intra_op_parallelism_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], max_intra_op_parallelism: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_max_intra_op_parallelism_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_max_intra_op_parallelism_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + max_intra_op_parallelism = _ops.convert_to_tensor(max_intra_op_parallelism, _dtypes.int64) + _inputs_flat = [input_dataset, max_intra_op_parallelism] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalMaxIntraOpParallelismDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalMaxIntraOpParallelismDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_non_serializable_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalNonSerializableDataset", name, input_dataset, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_non_serializable_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_non_serializable_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_non_serializable_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalNonSerializableDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalNonSerializableDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalNonSerializableDataset = tf_export("raw_ops.ExperimentalNonSerializableDataset")(_ops.to_raw_op(experimental_non_serializable_dataset)) + + +def experimental_non_serializable_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_non_serializable_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_non_serializable_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalNonSerializableDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalNonSerializableDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_parallel_interleave_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], sloppy: Annotated[Any, _atypes.Bool], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, with the exception + that if retrieving the next value from a dataset would cause the requester to + block, it will skip that input dataset. This dataset is especially useful + when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + allows the training step to proceed so long as some data is available. + + !! WARNING !! This dataset is not deterministic! + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + cycle_length: A `Tensor` of type `int64`. + block_length: A `Tensor` of type `int64`. + sloppy: A `Tensor` of type `bool`. + buffer_output_elements: A `Tensor` of type `int64`. + prefetch_input_elements: A `Tensor` of type `int64`. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalParallelInterleaveDataset", name, input_dataset, + other_arguments, cycle_length, block_length, sloppy, + buffer_output_elements, prefetch_input_elements, "f", f, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_parallel_interleave_dataset_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, sloppy, + buffer_output_elements, prefetch_input_elements, f=f, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_parallel_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_parallel_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalParallelInterleaveDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, + sloppy=sloppy, + buffer_output_elements=buffer_output_elements, + prefetch_input_elements=prefetch_input_elements, + f=f, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalParallelInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalParallelInterleaveDataset = tf_export("raw_ops.ExperimentalParallelInterleaveDataset")(_ops.to_raw_op(experimental_parallel_interleave_dataset)) + + +def experimental_parallel_interleave_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], sloppy: Annotated[Any, _atypes.Bool], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_parallel_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_parallel_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + sloppy = _ops.convert_to_tensor(sloppy, _dtypes.bool) + buffer_output_elements = _ops.convert_to_tensor(buffer_output_elements, _dtypes.int64) + prefetch_input_elements = _ops.convert_to_tensor(prefetch_input_elements, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalParallelInterleaveDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalParallelInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_parse_example_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, sloppy:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_parallel_calls: A `Tensor` of type `int64`. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A dict mapping string keys to `Tensor`s. + The keys of the dict must match the dense_keys of the feature. + sparse_keys: A list of `strings`. + A list of string keys in the examples features. + The results for these keys will be returned as `SparseTensor` objects. + dense_keys: A list of `strings`. + A list of Ndense string Tensors (scalars). + The keys expected in the Examples features associated with dense values. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `DTypes` of the same length as `sparse_keys`. + Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + and `tf.string` (`BytesList`) are supported. + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + List of tuples with the same length as `dense_keys`. + The shape of the data for each dense feature referenced by `dense_keys`. + Required for any input tensors identified by `dense_keys`. Must be + either fully defined, or may contain an unknown first dimension. + An unknown first dimension means the feature is treated as having + a variable number of blocks, and the output shape along this dimension + is considered unknown at graph build time. Padding is applied for + minibatch elements smaller than the maximum number of blocks for the + given feature along this dimension. + output_types: A list of `tf.DTypes` that has length `>= 1`. + The type list for the return values. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + The list of shapes being produced. + sloppy: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalParseExampleDataset", name, input_dataset, + num_parallel_calls, dense_defaults, "sparse_keys", sparse_keys, + "dense_keys", dense_keys, "sparse_types", sparse_types, + "dense_shapes", dense_shapes, "output_types", output_types, + "output_shapes", output_shapes, "sloppy", sloppy) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_parse_example_dataset_eager_fallback( + input_dataset, num_parallel_calls, dense_defaults, + sparse_keys=sparse_keys, dense_keys=dense_keys, + sparse_types=sparse_types, dense_shapes=dense_shapes, + output_types=output_types, output_shapes=output_shapes, + sloppy=sloppy, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'experimental_parse_example_dataset' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'experimental_parse_example_dataset' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'experimental_parse_example_dataset' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'experimental_parse_example_dataset' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_parse_example_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_parse_example_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalParseExampleDataset", input_dataset=input_dataset, + num_parallel_calls=num_parallel_calls, + dense_defaults=dense_defaults, + sparse_keys=sparse_keys, + dense_keys=dense_keys, + sparse_types=sparse_types, + dense_shapes=dense_shapes, + output_types=output_types, + output_shapes=output_shapes, + sloppy=sloppy, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("sparse_keys", _op.get_attr("sparse_keys"), "dense_keys", + _op.get_attr("dense_keys"), "sparse_types", + _op.get_attr("sparse_types"), "Tdense", _op.get_attr("Tdense"), + "dense_shapes", _op.get_attr("dense_shapes"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "sloppy", + _op._get_attr_bool("sloppy")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalParseExampleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalParseExampleDataset = tf_export("raw_ops.ExperimentalParseExampleDataset")(_ops.to_raw_op(experimental_parse_example_dataset)) + + +def experimental_parse_example_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, sloppy: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'experimental_parse_example_dataset' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'experimental_parse_example_dataset' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'experimental_parse_example_dataset' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'experimental_parse_example_dataset' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_parse_example_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_parse_example_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset, num_parallel_calls] + list(dense_defaults) + _attrs = ("sparse_keys", sparse_keys, "dense_keys", dense_keys, + "sparse_types", sparse_types, "Tdense", _attr_Tdense, "dense_shapes", + dense_shapes, "output_types", output_types, "output_shapes", output_shapes, + "sloppy", sloppy) + _result = _execute.execute(b"ExperimentalParseExampleDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalParseExampleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_private_thread_pool_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_threads: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_threads: A `Tensor` of type `int64`. + Identifies the number of threads to use for the private threadpool. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalPrivateThreadPoolDataset", name, input_dataset, + num_threads, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_private_thread_pool_dataset_eager_fallback( + input_dataset, num_threads, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_private_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_private_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalPrivateThreadPoolDataset", input_dataset=input_dataset, + num_threads=num_threads, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalPrivateThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalPrivateThreadPoolDataset = tf_export("raw_ops.ExperimentalPrivateThreadPoolDataset")(_ops.to_raw_op(experimental_private_thread_pool_dataset)) + + +def experimental_private_thread_pool_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_threads: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_private_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_private_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_threads = _ops.convert_to_tensor(num_threads, _dtypes.int64) + _inputs_flat = [input_dataset, num_threads] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalPrivateThreadPoolDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalPrivateThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_random_dataset(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a Dataset that returns pseudorandom numbers. + + Args: + seed: A `Tensor` of type `int64`. + A scalar seed for the random number generator. If either seed or + seed2 is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used. + seed2: A `Tensor` of type `int64`. + A second scalar seed to avoid seed collision. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalRandomDataset", name, seed, seed2, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_random_dataset_eager_fallback( + seed, seed2, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_random_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_random_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalRandomDataset", seed=seed, seed2=seed2, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalRandomDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalRandomDataset = tf_export("raw_ops.ExperimentalRandomDataset")(_ops.to_raw_op(experimental_random_dataset)) + + +def experimental_random_dataset_eager_fallback(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_random_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_random_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + _inputs_flat = [seed, seed2] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalRandomDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalRandomDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_rebatch_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_replicas: Annotated[Any, _atypes.Int64], output_types, output_shapes, use_fallback:bool=True, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that changes the batch size. + + Creates a dataset that changes the batch size of the dataset to current batch + size // num_replicas. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + num_replicas: A `Tensor` of type `int64`. + A scalar representing the number of replicas to distribute this batch across. As + a result of this transformation the current batch size would end up being + divided by this parameter. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_fallback: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalRebatchDataset", name, input_dataset, num_replicas, + "output_types", output_types, "output_shapes", output_shapes, + "use_fallback", use_fallback) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_rebatch_dataset_eager_fallback( + input_dataset, num_replicas, output_types=output_types, + output_shapes=output_shapes, use_fallback=use_fallback, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_rebatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_rebatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_fallback is None: + use_fallback = True + use_fallback = _execute.make_bool(use_fallback, "use_fallback") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalRebatchDataset", input_dataset=input_dataset, + num_replicas=num_replicas, + output_types=output_types, + output_shapes=output_shapes, + use_fallback=use_fallback, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_fallback", + _op._get_attr_bool("use_fallback")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalRebatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalRebatchDataset = tf_export("raw_ops.ExperimentalRebatchDataset")(_ops.to_raw_op(experimental_rebatch_dataset)) + + +def experimental_rebatch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_replicas: Annotated[Any, _atypes.Int64], output_types, output_shapes, use_fallback: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_rebatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_rebatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_fallback is None: + use_fallback = True + use_fallback = _execute.make_bool(use_fallback, "use_fallback") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_replicas = _ops.convert_to_tensor(num_replicas, _dtypes.int64) + _inputs_flat = [input_dataset, num_replicas] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "use_fallback", use_fallback) + _result = _execute.execute(b"ExperimentalRebatchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalRebatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_scan_dataset(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, preserve_cardinality:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset successively reduces `f` over the elements of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + initial_state: A list of `Tensor` objects. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalScanDataset", name, input_dataset, initial_state, + other_arguments, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "preserve_cardinality", + preserve_cardinality) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_scan_dataset_eager_fallback( + input_dataset, initial_state, other_arguments, f=f, + output_types=output_types, output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_scan_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_scan_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalScanDataset", input_dataset=input_dataset, + initial_state=initial_state, + other_arguments=other_arguments, f=f, + output_types=output_types, + output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Tstate", _op.get_attr("Tstate"), + "Targuments", _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalScanDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalScanDataset = tf_export("raw_ops.ExperimentalScanDataset")(_ops.to_raw_op(experimental_scan_dataset)) + + +def experimental_scan_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, preserve_cardinality: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_scan_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_scan_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + _attr_Tstate, initial_state = _execute.convert_to_mixed_eager_tensors(initial_state, ctx) + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(initial_state) + list(other_arguments) + _attrs = ("f", f, "Tstate", _attr_Tstate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes, + "preserve_cardinality", preserve_cardinality) + _result = _execute.execute(b"ExperimentalScanDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalScanDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_set_stats_aggregator_dataset(input_dataset: Annotated[Any, _atypes.Variant], stats_aggregator: Annotated[Any, _atypes.Resource], tag: Annotated[Any, _atypes.String], counter_prefix: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + stats_aggregator: A `Tensor` of type `resource`. + tag: A `Tensor` of type `string`. + counter_prefix: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalSetStatsAggregatorDataset", name, input_dataset, + stats_aggregator, tag, counter_prefix, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_set_stats_aggregator_dataset_eager_fallback( + input_dataset, stats_aggregator, tag, counter_prefix, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_set_stats_aggregator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_set_stats_aggregator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalSetStatsAggregatorDataset", input_dataset=input_dataset, + stats_aggregator=stats_aggregator, + tag=tag, + counter_prefix=counter_prefix, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalSetStatsAggregatorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalSetStatsAggregatorDataset = tf_export("raw_ops.ExperimentalSetStatsAggregatorDataset")(_ops.to_raw_op(experimental_set_stats_aggregator_dataset)) + + +def experimental_set_stats_aggregator_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], stats_aggregator: Annotated[Any, _atypes.Resource], tag: Annotated[Any, _atypes.String], counter_prefix: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_set_stats_aggregator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_set_stats_aggregator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + stats_aggregator = _ops.convert_to_tensor(stats_aggregator, _dtypes.resource) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + counter_prefix = _ops.convert_to_tensor(counter_prefix, _dtypes.string) + _inputs_flat = [input_dataset, stats_aggregator, tag, counter_prefix] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalSetStatsAggregatorDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalSetStatsAggregatorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_sleep_dataset(input_dataset: Annotated[Any, _atypes.Variant], sleep_microseconds: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + sleep_microseconds: A `Tensor` of type `int64`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalSleepDataset", name, input_dataset, + sleep_microseconds, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_sleep_dataset_eager_fallback( + input_dataset, sleep_microseconds, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sleep_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sleep_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalSleepDataset", input_dataset=input_dataset, + sleep_microseconds=sleep_microseconds, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalSleepDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalSleepDataset = tf_export("raw_ops.ExperimentalSleepDataset")(_ops.to_raw_op(experimental_sleep_dataset)) + + +def experimental_sleep_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], sleep_microseconds: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sleep_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sleep_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + sleep_microseconds = _ops.convert_to_tensor(sleep_microseconds, _dtypes.int64) + _inputs_flat = [input_dataset, sleep_microseconds] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalSleepDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalSleepDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_sliding_window_dataset(input_dataset: Annotated[Any, _atypes.Variant], window_size: Annotated[Any, _atypes.Int64], window_shift: Annotated[Any, _atypes.Int64], window_stride: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that passes a sliding window over `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + window_size: A `Tensor` of type `int64`. + A scalar representing the number of elements in the + sliding window. + window_shift: A `Tensor` of type `int64`. + A scalar representing the steps moving the sliding window + forward in one iteration. It must be positive. + window_stride: A `Tensor` of type `int64`. + A scalar representing the stride of the input elements of the sliding window. + It must be positive. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalSlidingWindowDataset", name, input_dataset, + window_size, window_shift, window_stride, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_sliding_window_dataset_eager_fallback( + input_dataset, window_size, window_shift, window_stride, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sliding_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sliding_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalSlidingWindowDataset", input_dataset=input_dataset, + window_size=window_size, + window_shift=window_shift, + window_stride=window_stride, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalSlidingWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalSlidingWindowDataset = tf_export("raw_ops.ExperimentalSlidingWindowDataset")(_ops.to_raw_op(experimental_sliding_window_dataset)) + + +def experimental_sliding_window_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], window_size: Annotated[Any, _atypes.Int64], window_shift: Annotated[Any, _atypes.Int64], window_stride: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sliding_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sliding_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + window_size = _ops.convert_to_tensor(window_size, _dtypes.int64) + window_shift = _ops.convert_to_tensor(window_shift, _dtypes.int64) + window_stride = _ops.convert_to_tensor(window_stride, _dtypes.int64) + _inputs_flat = [input_dataset, window_size, window_shift, window_stride] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalSlidingWindowDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalSlidingWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_sql_dataset(driver_name: Annotated[Any, _atypes.String], data_source_name: Annotated[Any, _atypes.String], query: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that executes a SQL query and emits rows of the result set. + + Args: + driver_name: A `Tensor` of type `string`. + The database type. Currently, the only supported type is 'sqlite'. + data_source_name: A `Tensor` of type `string`. + A connection string to connect to the database. + query: A `Tensor` of type `string`. A SQL query to execute. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalSqlDataset", name, driver_name, data_source_name, + query, "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_sql_dataset_eager_fallback( + driver_name, data_source_name, query, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sql_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sql_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalSqlDataset", driver_name=driver_name, + data_source_name=data_source_name, + query=query, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalSqlDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalSqlDataset = tf_export("raw_ops.ExperimentalSqlDataset")(_ops.to_raw_op(experimental_sql_dataset)) + + +def experimental_sql_dataset_eager_fallback(driver_name: Annotated[Any, _atypes.String], data_source_name: Annotated[Any, _atypes.String], query: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_sql_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_sql_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + driver_name = _ops.convert_to_tensor(driver_name, _dtypes.string) + data_source_name = _ops.convert_to_tensor(data_source_name, _dtypes.string) + query = _ops.convert_to_tensor(query, _dtypes.string) + _inputs_flat = [driver_name, data_source_name, query] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalSqlDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalSqlDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_stats_aggregator_handle(container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a statistics manager resource. + + Args: + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalStatsAggregatorHandle", name, "container", + container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_stats_aggregator_handle_eager_fallback( + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalStatsAggregatorHandle", container=container, + shared_name=shared_name, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalStatsAggregatorHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalStatsAggregatorHandle = tf_export("raw_ops.ExperimentalStatsAggregatorHandle")(_ops.to_raw_op(experimental_stats_aggregator_handle)) + + +def experimental_stats_aggregator_handle_eager_fallback(container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("container", container, "shared_name", shared_name) + _result = _execute.execute(b"ExperimentalStatsAggregatorHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalStatsAggregatorHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_stats_aggregator_summary(iterator: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Produces a summary of any statistics recorded by the given statistics manager. + + Args: + iterator: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalStatsAggregatorSummary", name, iterator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_stats_aggregator_summary_eager_fallback( + iterator, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalStatsAggregatorSummary", iterator=iterator, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalStatsAggregatorSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalStatsAggregatorSummary = tf_export("raw_ops.ExperimentalStatsAggregatorSummary")(_ops.to_raw_op(experimental_stats_aggregator_summary)) + + +def experimental_stats_aggregator_summary_eager_fallback(iterator: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [iterator] + _attrs = None + _result = _execute.execute(b"ExperimentalStatsAggregatorSummary", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalStatsAggregatorSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_take_while_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that stops iteration when predicate` is false. + + The `predicate` function must return a scalar boolean and accept the + following arguments: + + * One tensor for each component of an element of `input_dataset`. + * One tensor for each value in `other_arguments`. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `predicate`. + predicate: A function decorated with @Defun. + A function returning a scalar boolean. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalTakeWhileDataset", name, input_dataset, + other_arguments, "predicate", predicate, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_take_while_dataset_eager_fallback( + input_dataset, other_arguments, predicate=predicate, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_take_while_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_take_while_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalTakeWhileDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + predicate=predicate, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("predicate", _op.get_attr("predicate"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalTakeWhileDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalTakeWhileDataset = tf_export("raw_ops.ExperimentalTakeWhileDataset")(_ops.to_raw_op(experimental_take_while_dataset)) + + +def experimental_take_while_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_take_while_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_take_while_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("predicate", predicate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalTakeWhileDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalTakeWhileDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_thread_pool_dataset(input_dataset: Annotated[Any, _atypes.Variant], thread_pool: Annotated[Any, _atypes.Resource], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + thread_pool: A `Tensor` of type `resource`. + A resource produced by the ThreadPoolHandle op. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalThreadPoolDataset", name, input_dataset, + thread_pool, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_thread_pool_dataset_eager_fallback( + input_dataset, thread_pool, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalThreadPoolDataset", input_dataset=input_dataset, + thread_pool=thread_pool, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalThreadPoolDataset = tf_export("raw_ops.ExperimentalThreadPoolDataset")(_ops.to_raw_op(experimental_thread_pool_dataset)) + + +def experimental_thread_pool_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], thread_pool: Annotated[Any, _atypes.Resource], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + thread_pool = _ops.convert_to_tensor(thread_pool, _dtypes.resource) + _inputs_flat = [input_dataset, thread_pool] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalThreadPoolDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_thread_pool_handle(num_threads: int, display_name: str, max_intra_op_parallelism:int=1, container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + num_threads: An `int`. The number of threads in the thread pool. + display_name: A `string`. + A human-readable name for the threads that may be visible in some + visualizations. + threadpool. + max_intra_op_parallelism: An optional `int`. Defaults to `1`. + The maximum degree of parallelism to use within operations that execute on this + threadpool. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalThreadPoolHandle", name, "num_threads", + num_threads, "max_intra_op_parallelism", max_intra_op_parallelism, + "display_name", display_name, "container", container, "shared_name", + shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_thread_pool_handle_eager_fallback( + num_threads=num_threads, + max_intra_op_parallelism=max_intra_op_parallelism, + display_name=display_name, container=container, + shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_threads = _execute.make_int(num_threads, "num_threads") + display_name = _execute.make_str(display_name, "display_name") + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalThreadPoolHandle", num_threads=num_threads, + display_name=display_name, + max_intra_op_parallelism=max_intra_op_parallelism, + container=container, + shared_name=shared_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_threads", _op._get_attr_int("num_threads"), + "max_intra_op_parallelism", + _op._get_attr_int("max_intra_op_parallelism"), "display_name", + _op.get_attr("display_name"), "container", + _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalThreadPoolHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalThreadPoolHandle = tf_export("raw_ops.ExperimentalThreadPoolHandle")(_ops.to_raw_op(experimental_thread_pool_handle)) + + +def experimental_thread_pool_handle_eager_fallback(num_threads: int, display_name: str, max_intra_op_parallelism: int, container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + num_threads = _execute.make_int(num_threads, "num_threads") + display_name = _execute.make_str(display_name, "display_name") + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("num_threads", num_threads, "max_intra_op_parallelism", + max_intra_op_parallelism, "display_name", display_name, "container", + container, "shared_name", shared_name) + _result = _execute.execute(b"ExperimentalThreadPoolHandle", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalThreadPoolHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_unbatch_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""A dataset that splits the elements of its input into multiple elements. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalUnbatchDataset", name, input_dataset, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_unbatch_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_unbatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_unbatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalUnbatchDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalUnbatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalUnbatchDataset = tf_export("raw_ops.ExperimentalUnbatchDataset")(_ops.to_raw_op(experimental_unbatch_dataset)) + + +def experimental_unbatch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_unbatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_unbatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalUnbatchDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalUnbatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def experimental_unique_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that contains the unique elements of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ExperimentalUniqueDataset", name, input_dataset, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return experimental_unique_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_unique_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_unique_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ExperimentalUniqueDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ExperimentalUniqueDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ExperimentalUniqueDataset = tf_export("raw_ops.ExperimentalUniqueDataset")(_ops.to_raw_op(experimental_unique_dataset)) + + +def experimental_unique_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'experimental_unique_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'experimental_unique_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ExperimentalUniqueDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ExperimentalUniqueDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def get_element_at_index(dataset: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None): + r"""Gets the element at the specified index in a dataset. + + Args: + dataset: A `Tensor` of type `variant`. + index: A `Tensor` of type `int64`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GetElementAtIndex", name, dataset, index, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return get_element_at_index_eager_fallback( + dataset, index, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'get_element_at_index' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'get_element_at_index' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GetElementAtIndex", dataset=dataset, index=index, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "GetElementAtIndex", _inputs_flat, _attrs, _result) + return _result + +GetElementAtIndex = tf_export("raw_ops.GetElementAtIndex")(_ops.to_raw_op(get_element_at_index)) + + +def get_element_at_index_eager_fallback(dataset: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'get_element_at_index' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'get_element_at_index' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + index = _ops.convert_to_tensor(index, _dtypes.int64) + _inputs_flat = [dataset, index] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"GetElementAtIndex", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GetElementAtIndex", _inputs_flat, _attrs, _result) + return _result + + +def group_by_reducer_dataset(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments, key_func, init_func, reduce_func, finalize_func, output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that computes a group-by on `input_dataset`. + + Creates a dataset that computes a group-by on `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + key_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `key_func`. + init_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `init_func`. + reduce_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `reduce_func`. + finalize_func_other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `finalize_func`. + key_func: A function decorated with @Defun. + A function mapping an element of `input_dataset`, concatenated + with `key_func_other_arguments` to a scalar value of type DT_INT64. + init_func: A function decorated with @Defun. + A function mapping a key of type DT_INT64, concatenated with + `init_func_other_arguments` to the initial reducer state. + reduce_func: A function decorated with @Defun. + A function mapping the current reducer state and an element of `input_dataset`, + concatenated with `reduce_func_other_arguments` to a new reducer state. + finalize_func: A function decorated with @Defun. + A function mapping the final reducer state to an output element. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GroupByReducerDataset", name, input_dataset, + key_func_other_arguments, init_func_other_arguments, + reduce_func_other_arguments, finalize_func_other_arguments, + "key_func", key_func, "init_func", init_func, "reduce_func", + reduce_func, "finalize_func", finalize_func, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return group_by_reducer_dataset_eager_fallback( + input_dataset, key_func_other_arguments, init_func_other_arguments, + reduce_func_other_arguments, finalize_func_other_arguments, + key_func=key_func, init_func=init_func, reduce_func=reduce_func, + finalize_func=finalize_func, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'group_by_reducer_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'group_by_reducer_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GroupByReducerDataset", input_dataset=input_dataset, + key_func_other_arguments=key_func_other_arguments, + init_func_other_arguments=init_func_other_arguments, + reduce_func_other_arguments=reduce_func_other_arguments, + finalize_func_other_arguments=finalize_func_other_arguments, + key_func=key_func, init_func=init_func, + reduce_func=reduce_func, + finalize_func=finalize_func, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("key_func", _op.get_attr("key_func"), "init_func", + _op.get_attr("init_func"), "reduce_func", + _op.get_attr("reduce_func"), "finalize_func", + _op.get_attr("finalize_func"), "Tkey_func_other_arguments", + _op.get_attr("Tkey_func_other_arguments"), + "Tinit_func_other_arguments", + _op.get_attr("Tinit_func_other_arguments"), + "Treduce_func_other_arguments", + _op.get_attr("Treduce_func_other_arguments"), + "Tfinalize_func_other_arguments", + _op.get_attr("Tfinalize_func_other_arguments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "GroupByReducerDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +GroupByReducerDataset = tf_export("raw_ops.GroupByReducerDataset")(_ops.to_raw_op(group_by_reducer_dataset)) + + +def group_by_reducer_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments, key_func, init_func, reduce_func, finalize_func, output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'group_by_reducer_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'group_by_reducer_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _attr_Tkey_func_other_arguments, key_func_other_arguments = _execute.convert_to_mixed_eager_tensors(key_func_other_arguments, ctx) + _attr_Tinit_func_other_arguments, init_func_other_arguments = _execute.convert_to_mixed_eager_tensors(init_func_other_arguments, ctx) + _attr_Treduce_func_other_arguments, reduce_func_other_arguments = _execute.convert_to_mixed_eager_tensors(reduce_func_other_arguments, ctx) + _attr_Tfinalize_func_other_arguments, finalize_func_other_arguments = _execute.convert_to_mixed_eager_tensors(finalize_func_other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(key_func_other_arguments) + list(init_func_other_arguments) + list(reduce_func_other_arguments) + list(finalize_func_other_arguments) + _attrs = ("key_func", key_func, "init_func", init_func, "reduce_func", + reduce_func, "finalize_func", finalize_func, "Tkey_func_other_arguments", + _attr_Tkey_func_other_arguments, "Tinit_func_other_arguments", + _attr_Tinit_func_other_arguments, "Treduce_func_other_arguments", + _attr_Treduce_func_other_arguments, "Tfinalize_func_other_arguments", + _attr_Tfinalize_func_other_arguments, "output_types", output_types, + "output_shapes", output_shapes) + _result = _execute.execute(b"GroupByReducerDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GroupByReducerDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def group_by_window_dataset(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments, key_func, reduce_func, window_size_func, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that computes a windowed group-by on `input_dataset`. + + // TODO(mrry): Support non-int64 keys. + + Args: + input_dataset: A `Tensor` of type `variant`. + key_func_other_arguments: A list of `Tensor` objects. + reduce_func_other_arguments: A list of `Tensor` objects. + window_size_func_other_arguments: A list of `Tensor` objects. + key_func: A function decorated with @Defun. + A function mapping an element of `input_dataset`, concatenated + with `key_func_other_arguments` to a scalar value of type DT_INT64. + reduce_func: A function decorated with @Defun. + window_size_func: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "GroupByWindowDataset", name, input_dataset, + key_func_other_arguments, reduce_func_other_arguments, + window_size_func_other_arguments, "key_func", key_func, "reduce_func", + reduce_func, "window_size_func", window_size_func, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return group_by_window_dataset_eager_fallback( + input_dataset, key_func_other_arguments, + reduce_func_other_arguments, window_size_func_other_arguments, + key_func=key_func, reduce_func=reduce_func, + window_size_func=window_size_func, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'group_by_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'group_by_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "GroupByWindowDataset", input_dataset=input_dataset, + key_func_other_arguments=key_func_other_arguments, + reduce_func_other_arguments=reduce_func_other_arguments, + window_size_func_other_arguments=window_size_func_other_arguments, + key_func=key_func, reduce_func=reduce_func, + window_size_func=window_size_func, + output_types=output_types, + output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("key_func", _op.get_attr("key_func"), "reduce_func", + _op.get_attr("reduce_func"), "window_size_func", + _op.get_attr("window_size_func"), "Tkey_func_other_arguments", + _op.get_attr("Tkey_func_other_arguments"), + "Treduce_func_other_arguments", + _op.get_attr("Treduce_func_other_arguments"), + "Twindow_size_func_other_arguments", + _op.get_attr("Twindow_size_func_other_arguments"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "GroupByWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +GroupByWindowDataset = tf_export("raw_ops.GroupByWindowDataset")(_ops.to_raw_op(group_by_window_dataset)) + + +def group_by_window_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments, key_func, reduce_func, window_size_func, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'group_by_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'group_by_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Tkey_func_other_arguments, key_func_other_arguments = _execute.convert_to_mixed_eager_tensors(key_func_other_arguments, ctx) + _attr_Treduce_func_other_arguments, reduce_func_other_arguments = _execute.convert_to_mixed_eager_tensors(reduce_func_other_arguments, ctx) + _attr_Twindow_size_func_other_arguments, window_size_func_other_arguments = _execute.convert_to_mixed_eager_tensors(window_size_func_other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(key_func_other_arguments) + list(reduce_func_other_arguments) + list(window_size_func_other_arguments) + _attrs = ("key_func", key_func, "reduce_func", reduce_func, + "window_size_func", window_size_func, "Tkey_func_other_arguments", + _attr_Tkey_func_other_arguments, "Treduce_func_other_arguments", + _attr_Treduce_func_other_arguments, "Twindow_size_func_other_arguments", + _attr_Twindow_size_func_other_arguments, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"GroupByWindowDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "GroupByWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def ignore_errors_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, log_warning:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that contains the elements of `input_dataset` ignoring errors. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + log_warning: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IgnoreErrorsDataset", name, input_dataset, "output_types", + output_types, "output_shapes", output_shapes, "log_warning", + log_warning) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return ignore_errors_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, log_warning=log_warning, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'ignore_errors_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'ignore_errors_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if log_warning is None: + log_warning = False + log_warning = _execute.make_bool(log_warning, "log_warning") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IgnoreErrorsDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, + log_warning=log_warning, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "log_warning", + _op._get_attr_bool("log_warning")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IgnoreErrorsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IgnoreErrorsDataset = tf_export("raw_ops.IgnoreErrorsDataset")(_ops.to_raw_op(ignore_errors_dataset)) + + +def ignore_errors_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, log_warning: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'ignore_errors_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'ignore_errors_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if log_warning is None: + log_warning = False + log_warning = _execute.make_bool(log_warning, "log_warning") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "log_warning", log_warning) + _result = _execute.execute(b"IgnoreErrorsDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IgnoreErrorsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def initialize_table_from_dataset(table_handle: Annotated[Any, _atypes.Resource], dataset: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + table_handle: A `Tensor` of type `resource`. + dataset: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InitializeTableFromDataset", name, table_handle, dataset) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return initialize_table_from_dataset_eager_fallback( + table_handle, dataset, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InitializeTableFromDataset", table_handle=table_handle, + dataset=dataset, name=name) + return _op +InitializeTableFromDataset = tf_export("raw_ops.InitializeTableFromDataset")(_ops.to_raw_op(initialize_table_from_dataset)) + + +def initialize_table_from_dataset_eager_fallback(table_handle: Annotated[Any, _atypes.Resource], dataset: Annotated[Any, _atypes.Variant], name, ctx): + table_handle = _ops.convert_to_tensor(table_handle, _dtypes.resource) + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + _inputs_flat = [table_handle, dataset] + _attrs = None + _result = _execute.execute(b"InitializeTableFromDataset", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def iterator_get_device(resource: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Returns the name of the device on which `resource` has been placed. + + Args: + resource: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IteratorGetDevice", name, resource) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return iterator_get_device_eager_fallback( + resource, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IteratorGetDevice", resource=resource, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "IteratorGetDevice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IteratorGetDevice = tf_export("raw_ops.IteratorGetDevice")(_ops.to_raw_op(iterator_get_device)) + + +def iterator_get_device_eager_fallback(resource: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + _inputs_flat = [resource] + _attrs = None + _result = _execute.execute(b"IteratorGetDevice", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IteratorGetDevice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def lmdb_dataset(filenames: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits the key-value pairs in one or more LMDB files. + + The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary + key-value database. This dataset can read the contents of LMDB database files, + the names of which generally have the `.mdb` suffix. + + Each output element consists of a key-value pair represented as a pair of + scalar string `Tensor`s, where the first `Tensor` contains the key and the + second `Tensor` contains the value. + + LMDB uses different file formats on big- and little-endian machines. + `LMDBDataset` can only read files in the format of the host machine. + + Args: + filenames: A `Tensor` of type `string`. + A scalar or a vector containing the name(s) of the binary file(s) to be + read. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LMDBDataset", name, filenames, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return lmdb_dataset_eager_fallback( + filenames, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'lmdb_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'lmdb_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LMDBDataset", filenames=filenames, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LMDBDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LMDBDataset = tf_export("raw_ops.LMDBDataset")(_ops.to_raw_op(lmdb_dataset)) + + +def lmdb_dataset_eager_fallback(filenames: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'lmdb_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'lmdb_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + filenames = _ops.convert_to_tensor(filenames, _dtypes.string) + _inputs_flat = [filenames] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"LMDBDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LMDBDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def latency_stats_dataset(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Records the latency of producing `input_dataset` elements in a StatsAggregator. + + Args: + input_dataset: A `Tensor` of type `variant`. + tag: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LatencyStatsDataset", name, input_dataset, tag, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return latency_stats_dataset_eager_fallback( + input_dataset, tag, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'latency_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'latency_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LatencyStatsDataset", input_dataset=input_dataset, tag=tag, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LatencyStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LatencyStatsDataset = tf_export("raw_ops.LatencyStatsDataset")(_ops.to_raw_op(latency_stats_dataset)) + + +def latency_stats_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], tag: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'latency_stats_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'latency_stats_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + _inputs_flat = [input_dataset, tag] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"LatencyStatsDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LatencyStatsDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def legacy_parallel_interleave_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic:str="default", metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, with the exception + that if retrieving the next value from a dataset would cause the requester to + block, it will skip that input dataset. This dataset is especially useful + when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + allows the training step to proceed so long as some data is available. + + !! WARNING !! This dataset is not deterministic! + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + cycle_length: A `Tensor` of type `int64`. + block_length: A `Tensor` of type `int64`. + buffer_output_elements: A `Tensor` of type `int64`. + prefetch_input_elements: A `Tensor` of type `int64`. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + deterministic: An optional `string`. Defaults to `"default"`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LegacyParallelInterleaveDatasetV2", name, input_dataset, + other_arguments, cycle_length, block_length, buffer_output_elements, + prefetch_input_elements, "f", f, "deterministic", deterministic, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return legacy_parallel_interleave_dataset_v2_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, + buffer_output_elements, prefetch_input_elements, f=f, + deterministic=deterministic, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'legacy_parallel_interleave_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'legacy_parallel_interleave_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LegacyParallelInterleaveDatasetV2", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, + buffer_output_elements=buffer_output_elements, + prefetch_input_elements=prefetch_input_elements, + f=f, output_types=output_types, + output_shapes=output_shapes, + deterministic=deterministic, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "deterministic", + _op.get_attr("deterministic"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LegacyParallelInterleaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LegacyParallelInterleaveDatasetV2 = tf_export("raw_ops.LegacyParallelInterleaveDatasetV2")(_ops.to_raw_op(legacy_parallel_interleave_dataset_v2)) + + +def legacy_parallel_interleave_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, deterministic: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'legacy_parallel_interleave_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'legacy_parallel_interleave_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + buffer_output_elements = _ops.convert_to_tensor(buffer_output_elements, _dtypes.int64) + prefetch_input_elements = _ops.convert_to_tensor(prefetch_input_elements, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, buffer_output_elements, prefetch_input_elements] + _attrs = ("f", f, "deterministic", deterministic, "Targuments", + _attr_Targuments, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + _result = _execute.execute(b"LegacyParallelInterleaveDatasetV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LegacyParallelInterleaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def list_dataset(tensors, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that emits each of `tensors` once. + + Args: + tensors: A list of `Tensor` objects. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ListDataset", name, tensors, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return list_dataset_eager_fallback( + tensors, output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'list_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'list_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ListDataset", tensors=tensors, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tinput_types", _op.get_attr("Tinput_types"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ListDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ListDataset = tf_export("raw_ops.ListDataset")(_ops.to_raw_op(list_dataset)) + + +def list_dataset_eager_fallback(tensors, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'list_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'list_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Tinput_types, tensors = _execute.convert_to_mixed_eager_tensors(tensors, ctx) + _inputs_flat = list(tensors) + _attrs = ("Tinput_types", _attr_Tinput_types, "output_types", output_types, + "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"ListDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ListDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def list_snapshot_chunks_dataset(snapshot_path: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + snapshot_path: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ListSnapshotChunksDataset", name, snapshot_path, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return list_snapshot_chunks_dataset_eager_fallback( + snapshot_path, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'list_snapshot_chunks_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'list_snapshot_chunks_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ListSnapshotChunksDataset", snapshot_path=snapshot_path, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ListSnapshotChunksDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ListSnapshotChunksDataset = tf_export("raw_ops.ListSnapshotChunksDataset")(_ops.to_raw_op(list_snapshot_chunks_dataset)) + + +def list_snapshot_chunks_dataset_eager_fallback(snapshot_path: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'list_snapshot_chunks_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'list_snapshot_chunks_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + snapshot_path = _ops.convert_to_tensor(snapshot_path, _dtypes.string) + _inputs_flat = [snapshot_path] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ListSnapshotChunksDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ListSnapshotChunksDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def load_dataset(path: Annotated[Any, _atypes.String], reader_func_other_args, output_types, output_shapes, reader_func, compression:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + path: A `Tensor` of type `string`. + reader_func_other_args: A list of `Tensor` objects. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reader_func: A function decorated with @Defun. + compression: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadDataset", name, path, reader_func_other_args, + "output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "reader_func", reader_func) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_dataset_eager_fallback( + path, reader_func_other_args, output_types=output_types, + output_shapes=output_shapes, compression=compression, + reader_func=reader_func, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'load_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'load_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadDataset", path=path, + reader_func_other_args=reader_func_other_args, + output_types=output_types, output_shapes=output_shapes, + reader_func=reader_func, compression=compression, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "compression", + _op.get_attr("compression"), "reader_func", + _op.get_attr("reader_func"), "Treader_func_args", + _op.get_attr("Treader_func_args")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "LoadDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +LoadDataset = tf_export("raw_ops.LoadDataset")(_ops.to_raw_op(load_dataset)) + + +def load_dataset_eager_fallback(path: Annotated[Any, _atypes.String], reader_func_other_args, output_types, output_shapes, reader_func, compression: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'load_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'load_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + _attr_Treader_func_args, reader_func_other_args = _execute.convert_to_mixed_eager_tensors(reader_func_other_args, ctx) + path = _ops.convert_to_tensor(path, _dtypes.string) + _inputs_flat = [path] + list(reader_func_other_args) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "reader_func", reader_func, "Treader_func_args", + _attr_Treader_func_args) + _result = _execute.execute(b"LoadDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "LoadDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def map_and_batch_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], f, output_types, output_shapes, preserve_cardinality:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that fuses mapping with batching. + + Creates a dataset that applies `f` to the outputs of `input_dataset` and then + batches `batch_size` of them. + + Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + to `batch_size * num_parallel_batches` copies of `f` in parallel. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when building a closure + for `f`. + batch_size: A `Tensor` of type `int64`. + A scalar representing the number of elements to accumulate in a + batch. It determines the number of concurrent invocations of `f` that process + elements from `input_dataset` in parallel. + num_parallel_calls: A `Tensor` of type `int64`. + A scalar representing the maximum number of parallel invocations of the `map_fn` + function. Applying the `map_fn` on consecutive input elements in parallel has + the potential to improve input pipeline throughput. + drop_remainder: A `Tensor` of type `bool`. + A scalar representing whether the last batch should be dropped in case its size + is smaller than desired. + f: A function decorated with @Defun. + A function to apply to the outputs of `input_dataset`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MapAndBatchDataset", name, input_dataset, other_arguments, + batch_size, num_parallel_calls, drop_remainder, "f", f, + "output_types", output_types, "output_shapes", output_shapes, + "preserve_cardinality", preserve_cardinality, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return map_and_batch_dataset_eager_fallback( + input_dataset, other_arguments, batch_size, num_parallel_calls, + drop_remainder, f=f, output_types=output_types, + output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_and_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_and_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MapAndBatchDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + batch_size=batch_size, + num_parallel_calls=num_parallel_calls, + drop_remainder=drop_remainder, f=f, + output_types=output_types, + output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MapAndBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MapAndBatchDataset = tf_export("raw_ops.MapAndBatchDataset")(_ops.to_raw_op(map_and_batch_dataset)) + + +def map_and_batch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, batch_size: Annotated[Any, _atypes.Int64], num_parallel_calls: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], f, output_types, output_shapes, preserve_cardinality: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'map_and_batch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'map_and_batch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset] + list(other_arguments) + [batch_size, num_parallel_calls, drop_remainder] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "preserve_cardinality", + preserve_cardinality, "metadata", metadata) + _result = _execute.execute(b"MapAndBatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MapAndBatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def matching_files_dataset(patterns: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + patterns: A `Tensor` of type `string`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MatchingFilesDataset", name, patterns) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return matching_files_dataset_eager_fallback( + patterns, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MatchingFilesDataset", patterns=patterns, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "MatchingFilesDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MatchingFilesDataset = tf_export("raw_ops.MatchingFilesDataset")(_ops.to_raw_op(matching_files_dataset)) + + +def matching_files_dataset_eager_fallback(patterns: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Variant]: + patterns = _ops.convert_to_tensor(patterns, _dtypes.string) + _inputs_flat = [patterns] + _attrs = None + _result = _execute.execute(b"MatchingFilesDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MatchingFilesDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def max_intra_op_parallelism_dataset(input_dataset: Annotated[Any, _atypes.Variant], max_intra_op_parallelism: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that overrides the maximum intra-op parallelism. + + Args: + input_dataset: A `Tensor` of type `variant`. + max_intra_op_parallelism: A `Tensor` of type `int64`. + Identifies the maximum intra-op parallelism to use. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "MaxIntraOpParallelismDataset", name, input_dataset, + max_intra_op_parallelism, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return max_intra_op_parallelism_dataset_eager_fallback( + input_dataset, max_intra_op_parallelism, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'max_intra_op_parallelism_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'max_intra_op_parallelism_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "MaxIntraOpParallelismDataset", input_dataset=input_dataset, + max_intra_op_parallelism=max_intra_op_parallelism, + output_types=output_types, + output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "MaxIntraOpParallelismDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +MaxIntraOpParallelismDataset = tf_export("raw_ops.MaxIntraOpParallelismDataset")(_ops.to_raw_op(max_intra_op_parallelism_dataset)) + + +def max_intra_op_parallelism_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], max_intra_op_parallelism: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'max_intra_op_parallelism_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'max_intra_op_parallelism_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + max_intra_op_parallelism = _ops.convert_to_tensor(max_intra_op_parallelism, _dtypes.int64) + _inputs_flat = [input_dataset, max_intra_op_parallelism] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"MaxIntraOpParallelismDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "MaxIntraOpParallelismDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def non_serializable_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "NonSerializableDataset", name, input_dataset, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return non_serializable_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'non_serializable_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'non_serializable_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "NonSerializableDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "NonSerializableDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +NonSerializableDataset = tf_export("raw_ops.NonSerializableDataset")(_ops.to_raw_op(non_serializable_dataset)) + + +def non_serializable_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'non_serializable_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'non_serializable_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"NonSerializableDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "NonSerializableDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parallel_interleave_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], sloppy: Annotated[Any, _atypes.Bool], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that applies `f` to the outputs of `input_dataset`. + + The resulting dataset is similar to the `InterleaveDataset`, with the exception + that if retrieving the next value from a dataset would cause the requester to + block, it will skip that input dataset. This dataset is especially useful + when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + allows the training step to proceed so long as some data is available. + + !! WARNING !! If the `sloppy` parameter is set to `True`, the operation of this + dataset will not be deterministic! + + This dataset has been superseded by `ParallelInterleaveDatasetV2`. New code + should use `ParallelInterleaveDatasetV2`. + + The Python API `tf.data.experimental.parallel_interleave` creates instances of + this op. `tf.data.experimental.parallel_interleave` is a deprecated API. + + Args: + input_dataset: A `Tensor` of type `variant`. + Dataset that produces a stream of arguments for the function `f`. + other_arguments: A list of `Tensor` objects. + Additional arguments to pass to `f` beyond those produced by `input_dataset`. + Evaluated once when the dataset is instantiated. + cycle_length: A `Tensor` of type `int64`. + Number of datasets (each created by applying `f` to the elements of + `input_dataset`) among which the `ParallelInterleaveDataset` will cycle in a + round-robin fashion. + block_length: A `Tensor` of type `int64`. + Number of elements at a time to produce from each interleaved invocation of a + dataset returned by `f`. + sloppy: A `Tensor` of type `bool`. + If `True`, return elements as they become available, even if that means returning + these elements in a non-deterministic order. Sloppy operation may result in better + performance in the presence of stragglers, but the dataset will still block if + all of its open streams are blocked. + If `False`, always return elements in a deterministic order. + buffer_output_elements: A `Tensor` of type `int64`. + The number of elements each iterator being interleaved should buffer (similar + to the `.prefetch()` transformation for each interleaved iterator). + prefetch_input_elements: A `Tensor` of type `int64`. + Determines the number of iterators to prefetch, allowing buffers to warm up and + data to be pre-fetched without blocking the main thread. + f: A function decorated with @Defun. + A function mapping elements of `input_dataset`, concatenated with + `other_arguments`, to a Dataset variant that contains elements matching + `output_types` and `output_shapes`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParallelInterleaveDataset", name, input_dataset, + other_arguments, cycle_length, block_length, sloppy, + buffer_output_elements, prefetch_input_elements, "f", f, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parallel_interleave_dataset_eager_fallback( + input_dataset, other_arguments, cycle_length, block_length, sloppy, + buffer_output_elements, prefetch_input_elements, f=f, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParallelInterleaveDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + cycle_length=cycle_length, + block_length=block_length, sloppy=sloppy, + buffer_output_elements=buffer_output_elements, + prefetch_input_elements=prefetch_input_elements, + f=f, output_types=output_types, + output_shapes=output_shapes, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParallelInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParallelInterleaveDataset = tf_export("raw_ops.ParallelInterleaveDataset")(_ops.to_raw_op(parallel_interleave_dataset)) + + +def parallel_interleave_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, cycle_length: Annotated[Any, _atypes.Int64], block_length: Annotated[Any, _atypes.Int64], sloppy: Annotated[Any, _atypes.Bool], buffer_output_elements: Annotated[Any, _atypes.Int64], prefetch_input_elements: Annotated[Any, _atypes.Int64], f, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parallel_interleave_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parallel_interleave_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + cycle_length = _ops.convert_to_tensor(cycle_length, _dtypes.int64) + block_length = _ops.convert_to_tensor(block_length, _dtypes.int64) + sloppy = _ops.convert_to_tensor(sloppy, _dtypes.bool) + buffer_output_elements = _ops.convert_to_tensor(buffer_output_elements, _dtypes.int64) + prefetch_input_elements = _ops.convert_to_tensor(prefetch_input_elements, _dtypes.int64) + _inputs_flat = [input_dataset] + list(other_arguments) + [cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements] + _attrs = ("f", f, "Targuments", _attr_Targuments, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + _result = _execute.execute(b"ParallelInterleaveDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParallelInterleaveDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parse_example_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, sloppy:bool=False, ragged_keys=[], ragged_value_types=[], ragged_split_types=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_parallel_calls: A `Tensor` of type `int64`. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A dict mapping string keys to `Tensor`s. + The keys of the dict must match the dense_keys of the feature. + sparse_keys: A list of `strings`. + A list of string keys in the examples features. + The results for these keys will be returned as `SparseTensor` objects. + dense_keys: A list of `strings`. + A list of Ndense string Tensors (scalars). + The keys expected in the Examples features associated with dense values. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `DTypes` of the same length as `sparse_keys`. + Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + and `tf.string` (`BytesList`) are supported. + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + List of tuples with the same length as `dense_keys`. + The shape of the data for each dense feature referenced by `dense_keys`. + Required for any input tensors identified by `dense_keys`. Must be + either fully defined, or may contain an unknown first dimension. + An unknown first dimension means the feature is treated as having + a variable number of blocks, and the output shape along this dimension + is considered unknown at graph build time. Padding is applied for + minibatch elements smaller than the maximum number of blocks for the + given feature along this dimension. + output_types: A list of `tf.DTypes` that has length `>= 1`. + The type list for the return values. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + The list of shapes being produced. + sloppy: An optional `bool`. Defaults to `False`. + ragged_keys: An optional list of `strings`. Defaults to `[]`. + ragged_value_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + ragged_split_types: An optional list of `tf.DTypes` from: `tf.int32, tf.int64`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseExampleDataset", name, input_dataset, num_parallel_calls, + dense_defaults, "sparse_keys", sparse_keys, "dense_keys", dense_keys, + "sparse_types", sparse_types, "dense_shapes", dense_shapes, + "output_types", output_types, "output_shapes", output_shapes, + "sloppy", sloppy, "ragged_keys", ragged_keys, "ragged_value_types", + ragged_value_types, "ragged_split_types", ragged_split_types) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_example_dataset_eager_fallback( + input_dataset, num_parallel_calls, dense_defaults, + sparse_keys=sparse_keys, dense_keys=dense_keys, + sparse_types=sparse_types, dense_shapes=dense_shapes, + output_types=output_types, output_shapes=output_shapes, + sloppy=sloppy, ragged_keys=ragged_keys, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example_dataset' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example_dataset' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_dataset' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_dataset' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parse_example_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parse_example_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if ragged_keys is None: + ragged_keys = [] + if not isinstance(ragged_keys, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_keys' argument to " + "'parse_example_dataset' Op, not %r." % ragged_keys) + ragged_keys = [_execute.make_str(_s, "ragged_keys") for _s in ragged_keys] + if ragged_value_types is None: + ragged_value_types = [] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_dataset' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if ragged_split_types is None: + ragged_split_types = [] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_dataset' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseExampleDataset", input_dataset=input_dataset, + num_parallel_calls=num_parallel_calls, + dense_defaults=dense_defaults, + sparse_keys=sparse_keys, dense_keys=dense_keys, + sparse_types=sparse_types, + dense_shapes=dense_shapes, + output_types=output_types, + output_shapes=output_shapes, sloppy=sloppy, + ragged_keys=ragged_keys, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("sparse_keys", _op.get_attr("sparse_keys"), "dense_keys", + _op.get_attr("dense_keys"), "sparse_types", + _op.get_attr("sparse_types"), "Tdense", _op.get_attr("Tdense"), + "dense_shapes", _op.get_attr("dense_shapes"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "sloppy", + _op._get_attr_bool("sloppy"), "ragged_keys", + _op.get_attr("ragged_keys"), "ragged_value_types", + _op.get_attr("ragged_value_types"), "ragged_split_types", + _op.get_attr("ragged_split_types")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseExampleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParseExampleDataset = tf_export("raw_ops.ParseExampleDataset")(_ops.to_raw_op(parse_example_dataset)) + + +def parse_example_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, sloppy: bool, ragged_keys, ragged_value_types, ragged_split_types, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example_dataset' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example_dataset' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_dataset' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_dataset' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parse_example_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parse_example_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if sloppy is None: + sloppy = False + sloppy = _execute.make_bool(sloppy, "sloppy") + if ragged_keys is None: + ragged_keys = [] + if not isinstance(ragged_keys, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_keys' argument to " + "'parse_example_dataset' Op, not %r." % ragged_keys) + ragged_keys = [_execute.make_str(_s, "ragged_keys") for _s in ragged_keys] + if ragged_value_types is None: + ragged_value_types = [] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_dataset' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if ragged_split_types is None: + ragged_split_types = [] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_dataset' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset, num_parallel_calls] + list(dense_defaults) + _attrs = ("sparse_keys", sparse_keys, "dense_keys", dense_keys, + "sparse_types", sparse_types, "Tdense", _attr_Tdense, "dense_shapes", + dense_shapes, "output_types", output_types, "output_shapes", output_shapes, + "sloppy", sloppy, "ragged_keys", ragged_keys, "ragged_value_types", + ragged_value_types, "ragged_split_types", ragged_split_types) + _result = _execute.execute(b"ParseExampleDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseExampleDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def parse_example_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, deterministic:str="default", ragged_keys=[], ragged_value_types=[], ragged_split_types=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_parallel_calls: A `Tensor` of type `int64`. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A dict mapping string keys to `Tensor`s. + The keys of the dict must match the dense_keys of the feature. + sparse_keys: A list of `strings`. + A list of string keys in the examples features. + The results for these keys will be returned as `SparseTensor` objects. + dense_keys: A list of `strings`. + A list of Ndense string Tensors (scalars). + The keys expected in the Examples features associated with dense values. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `DTypes` of the same length as `sparse_keys`. + Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + and `tf.string` (`BytesList`) are supported. + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + List of tuples with the same length as `dense_keys`. + The shape of the data for each dense feature referenced by `dense_keys`. + Required for any input tensors identified by `dense_keys`. Must be + either fully defined, or may contain an unknown first dimension. + An unknown first dimension means the feature is treated as having + a variable number of blocks, and the output shape along this dimension + is considered unknown at graph build time. Padding is applied for + minibatch elements smaller than the maximum number of blocks for the + given feature along this dimension. + output_types: A list of `tf.DTypes` that has length `>= 1`. + The type list for the return values. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + The list of shapes being produced. + deterministic: An optional `string`. Defaults to `"default"`. + A string indicating the op-level determinism to use. Deterministic controls + whether the dataset is allowed to return elements out of order if the next + element to be returned isn't available, but a later element is. Options are + "true", "false", and "default". "default" indicates that determinism should be + decided by the `experimental_deterministic` parameter of `tf.data.Options`. + ragged_keys: An optional list of `strings`. Defaults to `[]`. + ragged_value_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + ragged_split_types: An optional list of `tf.DTypes` from: `tf.int32, tf.int64`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseExampleDatasetV2", name, input_dataset, + num_parallel_calls, dense_defaults, "sparse_keys", sparse_keys, + "dense_keys", dense_keys, "sparse_types", sparse_types, + "dense_shapes", dense_shapes, "output_types", output_types, + "output_shapes", output_shapes, "deterministic", deterministic, + "ragged_keys", ragged_keys, "ragged_value_types", ragged_value_types, + "ragged_split_types", ragged_split_types) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_example_dataset_v2_eager_fallback( + input_dataset, num_parallel_calls, dense_defaults, + sparse_keys=sparse_keys, dense_keys=dense_keys, + sparse_types=sparse_types, dense_shapes=dense_shapes, + output_types=output_types, output_shapes=output_shapes, + deterministic=deterministic, ragged_keys=ragged_keys, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_dataset_v2' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parse_example_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if ragged_keys is None: + ragged_keys = [] + if not isinstance(ragged_keys, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_keys) + ragged_keys = [_execute.make_str(_s, "ragged_keys") for _s in ragged_keys] + if ragged_value_types is None: + ragged_value_types = [] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if ragged_split_types is None: + ragged_split_types = [] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseExampleDatasetV2", input_dataset=input_dataset, + num_parallel_calls=num_parallel_calls, + dense_defaults=dense_defaults, + sparse_keys=sparse_keys, + dense_keys=dense_keys, + sparse_types=sparse_types, + dense_shapes=dense_shapes, + output_types=output_types, + output_shapes=output_shapes, + deterministic=deterministic, + ragged_keys=ragged_keys, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("sparse_keys", _op.get_attr("sparse_keys"), "dense_keys", + _op.get_attr("dense_keys"), "sparse_types", + _op.get_attr("sparse_types"), "Tdense", _op.get_attr("Tdense"), + "dense_shapes", _op.get_attr("dense_shapes"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "deterministic", + _op.get_attr("deterministic"), "ragged_keys", + _op.get_attr("ragged_keys"), "ragged_value_types", + _op.get_attr("ragged_value_types"), "ragged_split_types", + _op.get_attr("ragged_split_types")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseExampleDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParseExampleDatasetV2 = tf_export("raw_ops.ParseExampleDatasetV2")(_ops.to_raw_op(parse_example_dataset_v2)) + + +def parse_example_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_parallel_calls: Annotated[Any, _atypes.Int64], dense_defaults, sparse_keys, dense_keys, sparse_types, dense_shapes, output_types, output_shapes, deterministic: str, ragged_keys, ragged_value_types, ragged_split_types, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_dataset_v2' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'parse_example_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if deterministic is None: + deterministic = "default" + deterministic = _execute.make_str(deterministic, "deterministic") + if ragged_keys is None: + ragged_keys = [] + if not isinstance(ragged_keys, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_keys' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_keys) + ragged_keys = [_execute.make_str(_s, "ragged_keys") for _s in ragged_keys] + if ragged_value_types is None: + ragged_value_types = [] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if ragged_split_types is None: + ragged_split_types = [] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_dataset_v2' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_parallel_calls = _ops.convert_to_tensor(num_parallel_calls, _dtypes.int64) + _inputs_flat = [input_dataset, num_parallel_calls] + list(dense_defaults) + _attrs = ("sparse_keys", sparse_keys, "dense_keys", dense_keys, + "sparse_types", sparse_types, "Tdense", _attr_Tdense, "dense_shapes", + dense_shapes, "output_types", output_types, "output_shapes", output_shapes, + "deterministic", deterministic, "ragged_keys", ragged_keys, + "ragged_value_types", ragged_value_types, "ragged_split_types", + ragged_split_types) + _result = _execute.execute(b"ParseExampleDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseExampleDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def private_thread_pool_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_threads: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + num_threads: A `Tensor` of type `int64`. + Identifies the number of threads to use for the private threadpool. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PrivateThreadPoolDataset", name, input_dataset, num_threads, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return private_thread_pool_dataset_eager_fallback( + input_dataset, num_threads, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'private_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'private_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PrivateThreadPoolDataset", input_dataset=input_dataset, + num_threads=num_threads, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PrivateThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PrivateThreadPoolDataset = tf_export("raw_ops.PrivateThreadPoolDataset")(_ops.to_raw_op(private_thread_pool_dataset)) + + +def private_thread_pool_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_threads: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'private_thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'private_thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_threads = _ops.convert_to_tensor(num_threads, _dtypes.int64) + _inputs_flat = [input_dataset, num_threads] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"PrivateThreadPoolDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PrivateThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def random_dataset(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a Dataset that returns pseudorandom numbers. + + Creates a Dataset that returns a stream of uniformly distributed + pseudorandom 64-bit signed integers. + + In the TensorFlow Python API, you can instantiate this dataset via the + class `tf.data.experimental.RandomDataset`. + + Instances of this dataset are also created as a result of the + `hoist_random_uniform` static optimization. Whether this optimization is + performed is determined by the `experimental_optimization.hoist_random_uniform` + option of `tf.data.Options`. + + Args: + seed: A `Tensor` of type `int64`. + A scalar seed for the random number generator. If either seed or + seed2 is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used. + seed2: A `Tensor` of type `int64`. + A second scalar seed to avoid seed collision. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomDataset", name, seed, seed2, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_dataset_eager_fallback( + seed, seed2, output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'random_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'random_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomDataset", seed=seed, seed2=seed2, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomDataset = tf_export("raw_ops.RandomDataset")(_ops.to_raw_op(random_dataset)) + + +def random_dataset_eager_fallback(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'random_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'random_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + _inputs_flat = [seed, seed2] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"RandomDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def random_dataset_v2(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, rerandomize_each_iteration:bool=False, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a Dataset that returns pseudorandom numbers. + + Creates a Dataset that returns a stream of uniformly distributed + pseudorandom 64-bit signed integers. It accepts a boolean attribute that + determines if the random number generators are re-applied at each epoch. The + default value is True which means that the seeds are applied and the same + sequence of random numbers are generated at each epoch. If set to False, the + seeds are not re-applied and a different sequence of random numbers are + generated at each epoch. + + In the TensorFlow Python API, you can instantiate this dataset via the + class `tf.data.experimental.RandomDatasetV2`. + + Args: + seed: A `Tensor` of type `int64`. + A scalar seed for the random number generator. If either seed or + seed2 is set to be non-zero, the random number generator is seeded + by the given seed. Otherwise, a random seed is used. + seed2: A `Tensor` of type `int64`. + A second scalar seed to avoid seed collision. + seed_generator: A `Tensor` of type `resource`. + A resource for the random number seed generator. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + rerandomize_each_iteration: An optional `bool`. Defaults to `False`. + A boolean attribute to rerandomize the sequence of random numbers generated + at each epoch. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomDatasetV2", name, seed, seed2, seed_generator, + "rerandomize_each_iteration", rerandomize_each_iteration, + "output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_dataset_v2_eager_fallback( + seed, seed2, seed_generator, + rerandomize_each_iteration=rerandomize_each_iteration, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'random_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'random_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if rerandomize_each_iteration is None: + rerandomize_each_iteration = False + rerandomize_each_iteration = _execute.make_bool(rerandomize_each_iteration, "rerandomize_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomDatasetV2", seed=seed, seed2=seed2, + seed_generator=seed_generator, + output_types=output_types, + output_shapes=output_shapes, + rerandomize_each_iteration=rerandomize_each_iteration, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("rerandomize_each_iteration", + _op._get_attr_bool("rerandomize_each_iteration"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomDatasetV2 = tf_export("raw_ops.RandomDatasetV2")(_ops.to_raw_op(random_dataset_v2)) + + +def random_dataset_v2_eager_fallback(seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], seed_generator: Annotated[Any, _atypes.Resource], output_types, output_shapes, rerandomize_each_iteration: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'random_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'random_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if rerandomize_each_iteration is None: + rerandomize_each_iteration = False + rerandomize_each_iteration = _execute.make_bool(rerandomize_each_iteration, "rerandomize_each_iteration") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + seed_generator = _ops.convert_to_tensor(seed_generator, _dtypes.resource) + _inputs_flat = [seed, seed2, seed_generator] + _attrs = ("rerandomize_each_iteration", rerandomize_each_iteration, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"RandomDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def rebatch_dataset(input_dataset: Annotated[Any, _atypes.Variant], num_replicas: Annotated[Any, _atypes.Int64], output_types, output_shapes, use_fallback:bool=True, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that changes the batch size. + + Creates a dataset that changes the batch size of the dataset to current batch + size // num_workers. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + num_replicas: A `Tensor` of type `int64`. + A scalar representing the number of replicas to distribute this batch across. As + a result of this transformation the current batch size would end up being + divided by this parameter. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + use_fallback: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RebatchDataset", name, input_dataset, num_replicas, + "output_types", output_types, "output_shapes", output_shapes, + "use_fallback", use_fallback) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rebatch_dataset_eager_fallback( + input_dataset, num_replicas, output_types=output_types, + output_shapes=output_shapes, use_fallback=use_fallback, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rebatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rebatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_fallback is None: + use_fallback = True + use_fallback = _execute.make_bool(use_fallback, "use_fallback") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RebatchDataset", input_dataset=input_dataset, + num_replicas=num_replicas, + output_types=output_types, + output_shapes=output_shapes, + use_fallback=use_fallback, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "use_fallback", + _op._get_attr_bool("use_fallback")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RebatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RebatchDataset = tf_export("raw_ops.RebatchDataset")(_ops.to_raw_op(rebatch_dataset)) + + +def rebatch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], num_replicas: Annotated[Any, _atypes.Int64], output_types, output_shapes, use_fallback: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rebatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rebatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if use_fallback is None: + use_fallback = True + use_fallback = _execute.make_bool(use_fallback, "use_fallback") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + num_replicas = _ops.convert_to_tensor(num_replicas, _dtypes.int64) + _inputs_flat = [input_dataset, num_replicas] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "use_fallback", use_fallback) + _result = _execute.execute(b"RebatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RebatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def rebatch_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], batch_sizes: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that changes the batch size. + + Creates a dataset that rebatches elements from `input_dataset` into new batch + sizes. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + batch_sizes: A `Tensor` of type `int64`. + A vector of integers representing the size of batches to produce. These values + are cycled through in order. + drop_remainder: A `Tensor` of type `bool`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RebatchDatasetV2", name, input_dataset, batch_sizes, + drop_remainder, "output_types", output_types, "output_shapes", + output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rebatch_dataset_v2_eager_fallback( + input_dataset, batch_sizes, drop_remainder, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rebatch_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rebatch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RebatchDatasetV2", input_dataset=input_dataset, + batch_sizes=batch_sizes, + drop_remainder=drop_remainder, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RebatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RebatchDatasetV2 = tf_export("raw_ops.RebatchDatasetV2")(_ops.to_raw_op(rebatch_dataset_v2)) + + +def rebatch_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], batch_sizes: Annotated[Any, _atypes.Int64], drop_remainder: Annotated[Any, _atypes.Bool], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'rebatch_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'rebatch_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + batch_sizes = _ops.convert_to_tensor(batch_sizes, _dtypes.int64) + drop_remainder = _ops.convert_to_tensor(drop_remainder, _dtypes.bool) + _inputs_flat = [input_dataset, batch_sizes, drop_remainder] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"RebatchDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RebatchDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def register_dataset(dataset: Annotated[Any, _atypes.Variant], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], external_state_policy: int, element_spec:str="", metadata:str="", name=None) -> Annotated[Any, _atypes.Int64]: + r"""Registers a dataset with the tf.data service. + + Args: + dataset: A `Tensor` of type `variant`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + external_state_policy: An `int`. + element_spec: An optional `string`. Defaults to `""`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RegisterDataset", name, dataset, address, protocol, + "external_state_policy", external_state_policy, "element_spec", + element_spec, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return register_dataset_eager_fallback( + dataset, address, protocol, + external_state_policy=external_state_policy, + element_spec=element_spec, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if element_spec is None: + element_spec = "" + element_spec = _execute.make_str(element_spec, "element_spec") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RegisterDataset", dataset=dataset, address=address, + protocol=protocol, + external_state_policy=external_state_policy, + element_spec=element_spec, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("external_state_policy", + _op._get_attr_int("external_state_policy"), "element_spec", + _op.get_attr("element_spec"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RegisterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RegisterDataset = tf_export("raw_ops.RegisterDataset")(_ops.to_raw_op(register_dataset)) + + +def register_dataset_eager_fallback(dataset: Annotated[Any, _atypes.Variant], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], external_state_policy: int, element_spec: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.Int64]: + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if element_spec is None: + element_spec = "" + element_spec = _execute.make_str(element_spec, "element_spec") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + _inputs_flat = [dataset, address, protocol] + _attrs = ("external_state_policy", external_state_policy, "element_spec", + element_spec, "metadata", metadata) + _result = _execute.execute(b"RegisterDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RegisterDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def register_dataset_v2(dataset: Annotated[Any, _atypes.Variant], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], external_state_policy: int, element_spec:str="", requested_dataset_id:str="", metadata:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Registers a dataset with the tf.data service. + + Args: + dataset: A `Tensor` of type `variant`. + address: A `Tensor` of type `string`. + protocol: A `Tensor` of type `string`. + external_state_policy: An `int`. + element_spec: An optional `string`. Defaults to `""`. + requested_dataset_id: An optional `string`. Defaults to `""`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RegisterDatasetV2", name, dataset, address, protocol, + "external_state_policy", external_state_policy, "element_spec", + element_spec, "requested_dataset_id", requested_dataset_id, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return register_dataset_v2_eager_fallback( + dataset, address, protocol, + external_state_policy=external_state_policy, + element_spec=element_spec, + requested_dataset_id=requested_dataset_id, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if element_spec is None: + element_spec = "" + element_spec = _execute.make_str(element_spec, "element_spec") + if requested_dataset_id is None: + requested_dataset_id = "" + requested_dataset_id = _execute.make_str(requested_dataset_id, "requested_dataset_id") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RegisterDatasetV2", dataset=dataset, address=address, + protocol=protocol, + external_state_policy=external_state_policy, + element_spec=element_spec, + requested_dataset_id=requested_dataset_id, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("external_state_policy", + _op._get_attr_int("external_state_policy"), "element_spec", + _op.get_attr("element_spec"), "requested_dataset_id", + _op.get_attr("requested_dataset_id"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RegisterDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RegisterDatasetV2 = tf_export("raw_ops.RegisterDatasetV2")(_ops.to_raw_op(register_dataset_v2)) + + +def register_dataset_v2_eager_fallback(dataset: Annotated[Any, _atypes.Variant], address: Annotated[Any, _atypes.String], protocol: Annotated[Any, _atypes.String], external_state_policy: int, element_spec: str, requested_dataset_id: str, metadata: str, name, ctx) -> Annotated[Any, _atypes.String]: + external_state_policy = _execute.make_int(external_state_policy, "external_state_policy") + if element_spec is None: + element_spec = "" + element_spec = _execute.make_str(element_spec, "element_spec") + if requested_dataset_id is None: + requested_dataset_id = "" + requested_dataset_id = _execute.make_str(requested_dataset_id, "requested_dataset_id") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + dataset = _ops.convert_to_tensor(dataset, _dtypes.variant) + address = _ops.convert_to_tensor(address, _dtypes.string) + protocol = _ops.convert_to_tensor(protocol, _dtypes.string) + _inputs_flat = [dataset, address, protocol] + _attrs = ("external_state_policy", external_state_policy, "element_spec", + element_spec, "requested_dataset_id", requested_dataset_id, "metadata", + metadata) + _result = _execute.execute(b"RegisterDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RegisterDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sampling_dataset(input_dataset: Annotated[Any, _atypes.Variant], rate: Annotated[Any, _atypes.Float32], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that takes a Bernoulli sample of the contents of another dataset. + + There is no transformation in the `tf.data` Python API for creating this dataset. + Instead, it is created as a result of the `filter_with_random_uniform_fusion` + static optimization. Whether this optimization is performed is determined by the + `experimental_optimization.filter_with_random_uniform_fusion` option of + `tf.data.Options`. + + Args: + input_dataset: A `Tensor` of type `variant`. + rate: A `Tensor` of type `float32`. + A scalar representing the sample rate. Each element of `input_dataset` is + retained with this probability, independent of all other elements. + seed: A `Tensor` of type `int64`. + A scalar representing seed of random number generator. + seed2: A `Tensor` of type `int64`. + A scalar representing seed2 of random number generator. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SamplingDataset", name, input_dataset, rate, seed, seed2, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sampling_dataset_eager_fallback( + input_dataset, rate, seed, seed2, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sampling_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sampling_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SamplingDataset", input_dataset=input_dataset, rate=rate, seed=seed, + seed2=seed2, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SamplingDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SamplingDataset = tf_export("raw_ops.SamplingDataset")(_ops.to_raw_op(sampling_dataset)) + + +def sampling_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], rate: Annotated[Any, _atypes.Float32], seed: Annotated[Any, _atypes.Int64], seed2: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sampling_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sampling_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + rate = _ops.convert_to_tensor(rate, _dtypes.float32) + seed = _ops.convert_to_tensor(seed, _dtypes.int64) + seed2 = _ops.convert_to_tensor(seed2, _dtypes.int64) + _inputs_flat = [input_dataset, rate, seed, seed2] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"SamplingDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SamplingDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def save_dataset(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], shard_func_other_args, shard_func, compression:str="", use_shard_func:bool=True, name=None): + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + path: A `Tensor` of type `string`. + shard_func_other_args: A list of `Tensor` objects. + shard_func: A function decorated with @Defun. + compression: An optional `string`. Defaults to `""`. + use_shard_func: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SaveDataset", name, input_dataset, path, shard_func_other_args, + "compression", compression, "shard_func", shard_func, + "use_shard_func", use_shard_func) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return save_dataset_eager_fallback( + input_dataset, path, shard_func_other_args, compression=compression, + shard_func=shard_func, use_shard_func=use_shard_func, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if use_shard_func is None: + use_shard_func = True + use_shard_func = _execute.make_bool(use_shard_func, "use_shard_func") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SaveDataset", input_dataset=input_dataset, path=path, + shard_func_other_args=shard_func_other_args, + shard_func=shard_func, compression=compression, + use_shard_func=use_shard_func, name=name) + return _op +SaveDataset = tf_export("raw_ops.SaveDataset")(_ops.to_raw_op(save_dataset)) + + +def save_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], shard_func_other_args, shard_func, compression: str, use_shard_func: bool, name, ctx): + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if use_shard_func is None: + use_shard_func = True + use_shard_func = _execute.make_bool(use_shard_func, "use_shard_func") + _attr_Tshard_func_args, shard_func_other_args = _execute.convert_to_mixed_eager_tensors(shard_func_other_args, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + path = _ops.convert_to_tensor(path, _dtypes.string) + _inputs_flat = [input_dataset, path] + list(shard_func_other_args) + _attrs = ("compression", compression, "shard_func", shard_func, + "use_shard_func", use_shard_func, "Tshard_func_args", + _attr_Tshard_func_args) + _result = _execute.execute(b"SaveDataset", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def save_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], shard_func_other_args, shard_func, output_types, output_shapes, compression:str="", use_shard_func:bool=True, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + path: A `Tensor` of type `string`. + shard_func_other_args: A list of `Tensor` objects. + shard_func: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + compression: An optional `string`. Defaults to `""`. + use_shard_func: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SaveDatasetV2", name, input_dataset, path, + shard_func_other_args, "compression", compression, "shard_func", + shard_func, "use_shard_func", use_shard_func, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return save_dataset_v2_eager_fallback( + input_dataset, path, shard_func_other_args, compression=compression, + shard_func=shard_func, use_shard_func=use_shard_func, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'save_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'save_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if use_shard_func is None: + use_shard_func = True + use_shard_func = _execute.make_bool(use_shard_func, "use_shard_func") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SaveDatasetV2", input_dataset=input_dataset, path=path, + shard_func_other_args=shard_func_other_args, + shard_func=shard_func, output_types=output_types, + output_shapes=output_shapes, compression=compression, + use_shard_func=use_shard_func, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("compression", _op.get_attr("compression"), "shard_func", + _op.get_attr("shard_func"), "use_shard_func", + _op._get_attr_bool("use_shard_func"), "Tshard_func_args", + _op.get_attr("Tshard_func_args"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SaveDatasetV2 = tf_export("raw_ops.SaveDatasetV2")(_ops.to_raw_op(save_dataset_v2)) + + +def save_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], shard_func_other_args, shard_func, output_types, output_shapes, compression: str, use_shard_func: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'save_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'save_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if use_shard_func is None: + use_shard_func = True + use_shard_func = _execute.make_bool(use_shard_func, "use_shard_func") + _attr_Tshard_func_args, shard_func_other_args = _execute.convert_to_mixed_eager_tensors(shard_func_other_args, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + path = _ops.convert_to_tensor(path, _dtypes.string) + _inputs_flat = [input_dataset, path] + list(shard_func_other_args) + _attrs = ("compression", compression, "shard_func", shard_func, + "use_shard_func", use_shard_func, "Tshard_func_args", + _attr_Tshard_func_args, "output_types", output_types, "output_shapes", + output_shapes) + _result = _execute.execute(b"SaveDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SaveDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def scan_dataset(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, preserve_cardinality:bool=False, use_default_device:bool=True, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset successively reduces `f` over the elements of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + initial_state: A list of `Tensor` objects. + other_arguments: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + preserve_cardinality: An optional `bool`. Defaults to `False`. + use_default_device: An optional `bool`. Defaults to `True`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ScanDataset", name, input_dataset, initial_state, + other_arguments, "f", f, "output_types", output_types, + "output_shapes", output_shapes, "preserve_cardinality", + preserve_cardinality, "use_default_device", use_default_device, + "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return scan_dataset_eager_fallback( + input_dataset, initial_state, other_arguments, f=f, + output_types=output_types, output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, + use_default_device=use_default_device, metadata=metadata, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'scan_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'scan_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if use_default_device is None: + use_default_device = True + use_default_device = _execute.make_bool(use_default_device, "use_default_device") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ScanDataset", input_dataset=input_dataset, + initial_state=initial_state, + other_arguments=other_arguments, f=f, + output_types=output_types, output_shapes=output_shapes, + preserve_cardinality=preserve_cardinality, + use_default_device=use_default_device, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("f", _op.get_attr("f"), "Tstate", _op.get_attr("Tstate"), + "Targuments", _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "preserve_cardinality", + _op._get_attr_bool("preserve_cardinality"), + "use_default_device", _op._get_attr_bool("use_default_device"), + "metadata", _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ScanDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ScanDataset = tf_export("raw_ops.ScanDataset")(_ops.to_raw_op(scan_dataset)) + + +def scan_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], initial_state, other_arguments, f, output_types, output_shapes, preserve_cardinality: bool, use_default_device: bool, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'scan_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'scan_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if preserve_cardinality is None: + preserve_cardinality = False + preserve_cardinality = _execute.make_bool(preserve_cardinality, "preserve_cardinality") + if use_default_device is None: + use_default_device = True + use_default_device = _execute.make_bool(use_default_device, "use_default_device") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Tstate, initial_state = _execute.convert_to_mixed_eager_tensors(initial_state, ctx) + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(initial_state) + list(other_arguments) + _attrs = ("f", f, "Tstate", _attr_Tstate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes, + "preserve_cardinality", preserve_cardinality, "use_default_device", + use_default_device, "metadata", metadata) + _result = _execute.execute(b"ScanDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ScanDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def set_stats_aggregator_dataset(input_dataset: Annotated[Any, _atypes.Variant], stats_aggregator: Annotated[Any, _atypes.Resource], tag: Annotated[Any, _atypes.String], counter_prefix: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + stats_aggregator: A `Tensor` of type `resource`. + tag: A `Tensor` of type `string`. + counter_prefix: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SetStatsAggregatorDataset", name, input_dataset, + stats_aggregator, tag, counter_prefix, "output_types", output_types, + "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return set_stats_aggregator_dataset_eager_fallback( + input_dataset, stats_aggregator, tag, counter_prefix, + output_types=output_types, output_shapes=output_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'set_stats_aggregator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'set_stats_aggregator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SetStatsAggregatorDataset", input_dataset=input_dataset, + stats_aggregator=stats_aggregator, + tag=tag, counter_prefix=counter_prefix, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SetStatsAggregatorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SetStatsAggregatorDataset = tf_export("raw_ops.SetStatsAggregatorDataset")(_ops.to_raw_op(set_stats_aggregator_dataset)) + + +def set_stats_aggregator_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], stats_aggregator: Annotated[Any, _atypes.Resource], tag: Annotated[Any, _atypes.String], counter_prefix: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'set_stats_aggregator_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'set_stats_aggregator_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + stats_aggregator = _ops.convert_to_tensor(stats_aggregator, _dtypes.resource) + tag = _ops.convert_to_tensor(tag, _dtypes.string) + counter_prefix = _ops.convert_to_tensor(counter_prefix, _dtypes.string) + _inputs_flat = [input_dataset, stats_aggregator, tag, counter_prefix] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"SetStatsAggregatorDataset", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SetStatsAggregatorDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sleep_dataset(input_dataset: Annotated[Any, _atypes.Variant], sleep_microseconds: Annotated[Any, _atypes.Int64], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + input_dataset: A `Tensor` of type `variant`. + sleep_microseconds: A `Tensor` of type `int64`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SleepDataset", name, input_dataset, sleep_microseconds, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sleep_dataset_eager_fallback( + input_dataset, sleep_microseconds, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sleep_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sleep_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SleepDataset", input_dataset=input_dataset, + sleep_microseconds=sleep_microseconds, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SleepDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SleepDataset = tf_export("raw_ops.SleepDataset")(_ops.to_raw_op(sleep_dataset)) + + +def sleep_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], sleep_microseconds: Annotated[Any, _atypes.Int64], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sleep_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sleep_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + sleep_microseconds = _ops.convert_to_tensor(sleep_microseconds, _dtypes.int64) + _inputs_flat = [input_dataset, sleep_microseconds] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"SleepDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SleepDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sliding_window_dataset(input_dataset: Annotated[Any, _atypes.Variant], window_size: Annotated[Any, _atypes.Int64], window_shift: Annotated[Any, _atypes.Int64], window_stride: Annotated[Any, _atypes.Int64], output_types, output_shapes, drop_remainder:bool=True, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that passes a sliding window over `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + window_size: A `Tensor` of type `int64`. + A scalar representing the number of elements in the + sliding window. + window_shift: A `Tensor` of type `int64`. + A scalar representing the steps moving the sliding window + forward in one iteration. It must be positive. + window_stride: A `Tensor` of type `int64`. + A scalar representing the stride of the input elements of the sliding window. + It must be positive. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + drop_remainder: An optional `bool`. Defaults to `True`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SlidingWindowDataset", name, input_dataset, window_size, + window_shift, window_stride, "drop_remainder", drop_remainder, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sliding_window_dataset_eager_fallback( + input_dataset, window_size, window_shift, window_stride, + drop_remainder=drop_remainder, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sliding_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sliding_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if drop_remainder is None: + drop_remainder = True + drop_remainder = _execute.make_bool(drop_remainder, "drop_remainder") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SlidingWindowDataset", input_dataset=input_dataset, + window_size=window_size, + window_shift=window_shift, + window_stride=window_stride, + output_types=output_types, + output_shapes=output_shapes, + drop_remainder=drop_remainder, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("drop_remainder", _op._get_attr_bool("drop_remainder"), + "output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SlidingWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SlidingWindowDataset = tf_export("raw_ops.SlidingWindowDataset")(_ops.to_raw_op(sliding_window_dataset)) + + +def sliding_window_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], window_size: Annotated[Any, _atypes.Int64], window_shift: Annotated[Any, _atypes.Int64], window_stride: Annotated[Any, _atypes.Int64], output_types, output_shapes, drop_remainder: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sliding_window_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sliding_window_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if drop_remainder is None: + drop_remainder = True + drop_remainder = _execute.make_bool(drop_remainder, "drop_remainder") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + window_size = _ops.convert_to_tensor(window_size, _dtypes.int64) + window_shift = _ops.convert_to_tensor(window_shift, _dtypes.int64) + window_stride = _ops.convert_to_tensor(window_stride, _dtypes.int64) + _inputs_flat = [input_dataset, window_size, window_shift, window_stride] + _attrs = ("drop_remainder", drop_remainder, "output_types", output_types, + "output_shapes", output_shapes) + _result = _execute.execute(b"SlidingWindowDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SlidingWindowDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def snapshot_chunk_dataset(chunk_file: Annotated[Any, _atypes.String], output_types, output_shapes, compression:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + chunk_file: A `Tensor` of type `string`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + compression: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SnapshotChunkDataset", name, chunk_file, "output_types", + output_types, "output_shapes", output_shapes, "compression", + compression) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return snapshot_chunk_dataset_eager_fallback( + chunk_file, output_types=output_types, output_shapes=output_shapes, + compression=compression, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_chunk_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_chunk_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SnapshotChunkDataset", chunk_file=chunk_file, + output_types=output_types, + output_shapes=output_shapes, + compression=compression, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "compression", + _op.get_attr("compression")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SnapshotChunkDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SnapshotChunkDataset = tf_export("raw_ops.SnapshotChunkDataset")(_ops.to_raw_op(snapshot_chunk_dataset)) + + +def snapshot_chunk_dataset_eager_fallback(chunk_file: Annotated[Any, _atypes.String], output_types, output_shapes, compression: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_chunk_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_chunk_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + chunk_file = _ops.convert_to_tensor(chunk_file, _dtypes.string) + _inputs_flat = [chunk_file] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "compression", compression) + _result = _execute.execute(b"SnapshotChunkDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SnapshotChunkDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def snapshot_dataset(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], output_types, output_shapes, compression:str="", reader_path_prefix:str="", writer_path_prefix:str="", shard_size_bytes:int=10737418240, pending_snapshot_expiry_seconds:int=86400, num_reader_threads:int=1, reader_buffer_size:int=1, num_writer_threads:int=1, writer_buffer_size:int=1, shuffle_on_read:bool=False, seed:int=0, seed2:int=0, mode:str="auto", snapshot_name:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that will write to / read from a snapshot. + + This dataset attempts to determine whether a valid snapshot exists at the + `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. + If not, it will run the preprocessing pipeline as usual, and write out a + snapshot of the data processed for future use. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + path: A `Tensor` of type `string`. + The path we should write snapshots to / read snapshots from. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + compression: An optional `string`. Defaults to `""`. + reader_path_prefix: An optional `string`. Defaults to `""`. + writer_path_prefix: An optional `string`. Defaults to `""`. + shard_size_bytes: An optional `int`. Defaults to `10737418240`. + pending_snapshot_expiry_seconds: An optional `int`. Defaults to `86400`. + num_reader_threads: An optional `int`. Defaults to `1`. + reader_buffer_size: An optional `int`. Defaults to `1`. + num_writer_threads: An optional `int`. Defaults to `1`. + writer_buffer_size: An optional `int`. Defaults to `1`. + shuffle_on_read: An optional `bool`. Defaults to `False`. + seed: An optional `int`. Defaults to `0`. + seed2: An optional `int`. Defaults to `0`. + mode: An optional `string`. Defaults to `"auto"`. + snapshot_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SnapshotDataset", name, input_dataset, path, "output_types", + output_types, "output_shapes", output_shapes, "compression", + compression, "reader_path_prefix", reader_path_prefix, + "writer_path_prefix", writer_path_prefix, "shard_size_bytes", + shard_size_bytes, "pending_snapshot_expiry_seconds", + pending_snapshot_expiry_seconds, "num_reader_threads", + num_reader_threads, "reader_buffer_size", reader_buffer_size, + "num_writer_threads", num_writer_threads, "writer_buffer_size", + writer_buffer_size, "shuffle_on_read", shuffle_on_read, "seed", seed, + "seed2", seed2, "mode", mode, "snapshot_name", snapshot_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return snapshot_dataset_eager_fallback( + input_dataset, path, output_types=output_types, + output_shapes=output_shapes, compression=compression, + reader_path_prefix=reader_path_prefix, + writer_path_prefix=writer_path_prefix, + shard_size_bytes=shard_size_bytes, + pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds, + num_reader_threads=num_reader_threads, + reader_buffer_size=reader_buffer_size, + num_writer_threads=num_writer_threads, + writer_buffer_size=writer_buffer_size, + shuffle_on_read=shuffle_on_read, seed=seed, seed2=seed2, mode=mode, + snapshot_name=snapshot_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if reader_path_prefix is None: + reader_path_prefix = "" + reader_path_prefix = _execute.make_str(reader_path_prefix, "reader_path_prefix") + if writer_path_prefix is None: + writer_path_prefix = "" + writer_path_prefix = _execute.make_str(writer_path_prefix, "writer_path_prefix") + if shard_size_bytes is None: + shard_size_bytes = 10737418240 + shard_size_bytes = _execute.make_int(shard_size_bytes, "shard_size_bytes") + if pending_snapshot_expiry_seconds is None: + pending_snapshot_expiry_seconds = 86400 + pending_snapshot_expiry_seconds = _execute.make_int(pending_snapshot_expiry_seconds, "pending_snapshot_expiry_seconds") + if num_reader_threads is None: + num_reader_threads = 1 + num_reader_threads = _execute.make_int(num_reader_threads, "num_reader_threads") + if reader_buffer_size is None: + reader_buffer_size = 1 + reader_buffer_size = _execute.make_int(reader_buffer_size, "reader_buffer_size") + if num_writer_threads is None: + num_writer_threads = 1 + num_writer_threads = _execute.make_int(num_writer_threads, "num_writer_threads") + if writer_buffer_size is None: + writer_buffer_size = 1 + writer_buffer_size = _execute.make_int(writer_buffer_size, "writer_buffer_size") + if shuffle_on_read is None: + shuffle_on_read = False + shuffle_on_read = _execute.make_bool(shuffle_on_read, "shuffle_on_read") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if mode is None: + mode = "auto" + mode = _execute.make_str(mode, "mode") + if snapshot_name is None: + snapshot_name = "" + snapshot_name = _execute.make_str(snapshot_name, "snapshot_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SnapshotDataset", input_dataset=input_dataset, path=path, + output_types=output_types, + output_shapes=output_shapes, + compression=compression, + reader_path_prefix=reader_path_prefix, + writer_path_prefix=writer_path_prefix, + shard_size_bytes=shard_size_bytes, + pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds, + num_reader_threads=num_reader_threads, + reader_buffer_size=reader_buffer_size, + num_writer_threads=num_writer_threads, + writer_buffer_size=writer_buffer_size, + shuffle_on_read=shuffle_on_read, seed=seed, + seed2=seed2, mode=mode, + snapshot_name=snapshot_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "compression", + _op.get_attr("compression"), "reader_path_prefix", + _op.get_attr("reader_path_prefix"), "writer_path_prefix", + _op.get_attr("writer_path_prefix"), "shard_size_bytes", + _op._get_attr_int("shard_size_bytes"), + "pending_snapshot_expiry_seconds", + _op._get_attr_int("pending_snapshot_expiry_seconds"), + "num_reader_threads", _op._get_attr_int("num_reader_threads"), + "reader_buffer_size", _op._get_attr_int("reader_buffer_size"), + "num_writer_threads", _op._get_attr_int("num_writer_threads"), + "writer_buffer_size", _op._get_attr_int("writer_buffer_size"), + "shuffle_on_read", _op._get_attr_bool("shuffle_on_read"), + "seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "mode", _op.get_attr("mode"), + "snapshot_name", _op.get_attr("snapshot_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SnapshotDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SnapshotDataset = tf_export("raw_ops.SnapshotDataset")(_ops.to_raw_op(snapshot_dataset)) + + +def snapshot_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], output_types, output_shapes, compression: str, reader_path_prefix: str, writer_path_prefix: str, shard_size_bytes: int, pending_snapshot_expiry_seconds: int, num_reader_threads: int, reader_buffer_size: int, num_writer_threads: int, writer_buffer_size: int, shuffle_on_read: bool, seed: int, seed2: int, mode: str, snapshot_name: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if reader_path_prefix is None: + reader_path_prefix = "" + reader_path_prefix = _execute.make_str(reader_path_prefix, "reader_path_prefix") + if writer_path_prefix is None: + writer_path_prefix = "" + writer_path_prefix = _execute.make_str(writer_path_prefix, "writer_path_prefix") + if shard_size_bytes is None: + shard_size_bytes = 10737418240 + shard_size_bytes = _execute.make_int(shard_size_bytes, "shard_size_bytes") + if pending_snapshot_expiry_seconds is None: + pending_snapshot_expiry_seconds = 86400 + pending_snapshot_expiry_seconds = _execute.make_int(pending_snapshot_expiry_seconds, "pending_snapshot_expiry_seconds") + if num_reader_threads is None: + num_reader_threads = 1 + num_reader_threads = _execute.make_int(num_reader_threads, "num_reader_threads") + if reader_buffer_size is None: + reader_buffer_size = 1 + reader_buffer_size = _execute.make_int(reader_buffer_size, "reader_buffer_size") + if num_writer_threads is None: + num_writer_threads = 1 + num_writer_threads = _execute.make_int(num_writer_threads, "num_writer_threads") + if writer_buffer_size is None: + writer_buffer_size = 1 + writer_buffer_size = _execute.make_int(writer_buffer_size, "writer_buffer_size") + if shuffle_on_read is None: + shuffle_on_read = False + shuffle_on_read = _execute.make_bool(shuffle_on_read, "shuffle_on_read") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if mode is None: + mode = "auto" + mode = _execute.make_str(mode, "mode") + if snapshot_name is None: + snapshot_name = "" + snapshot_name = _execute.make_str(snapshot_name, "snapshot_name") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + path = _ops.convert_to_tensor(path, _dtypes.string) + _inputs_flat = [input_dataset, path] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "reader_path_prefix", reader_path_prefix, + "writer_path_prefix", writer_path_prefix, "shard_size_bytes", + shard_size_bytes, "pending_snapshot_expiry_seconds", + pending_snapshot_expiry_seconds, "num_reader_threads", num_reader_threads, + "reader_buffer_size", reader_buffer_size, "num_writer_threads", + num_writer_threads, "writer_buffer_size", writer_buffer_size, + "shuffle_on_read", shuffle_on_read, "seed", seed, "seed2", seed2, "mode", + mode, "snapshot_name", snapshot_name) + _result = _execute.execute(b"SnapshotDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SnapshotDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def snapshot_dataset_reader(shard_dir: Annotated[Any, _atypes.String], start_index: Annotated[Any, _atypes.Int64], output_types, output_shapes, version: int, compression:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + shard_dir: A `Tensor` of type `string`. + start_index: A `Tensor` of type `int64`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + version: An `int`. + compression: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SnapshotDatasetReader", name, shard_dir, start_index, + "output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "version", version) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return snapshot_dataset_reader_eager_fallback( + shard_dir, start_index, output_types=output_types, + output_shapes=output_shapes, compression=compression, + version=version, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset_reader' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset_reader' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + version = _execute.make_int(version, "version") + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SnapshotDatasetReader", shard_dir=shard_dir, start_index=start_index, + output_types=output_types, + output_shapes=output_shapes, version=version, + compression=compression, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "compression", + _op.get_attr("compression"), "version", + _op._get_attr_int("version")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SnapshotDatasetReader", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SnapshotDatasetReader = tf_export("raw_ops.SnapshotDatasetReader")(_ops.to_raw_op(snapshot_dataset_reader)) + + +def snapshot_dataset_reader_eager_fallback(shard_dir: Annotated[Any, _atypes.String], start_index: Annotated[Any, _atypes.Int64], output_types, output_shapes, version: int, compression: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset_reader' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset_reader' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + version = _execute.make_int(version, "version") + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + shard_dir = _ops.convert_to_tensor(shard_dir, _dtypes.string) + start_index = _ops.convert_to_tensor(start_index, _dtypes.int64) + _inputs_flat = [shard_dir, start_index] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "version", version) + _result = _execute.execute(b"SnapshotDatasetReader", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SnapshotDatasetReader", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def snapshot_dataset_v2(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], reader_func_other_args, shard_func_other_args, output_types, output_shapes, reader_func, shard_func, compression:str="", reader_prefix:str="", writer_prefix:str="", hash_valid:bool=False, hash:int=0, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that will write to / read from a snapshot. + + This dataset attempts to determine whether a valid snapshot exists at the + `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. + If not, it will run the preprocessing pipeline as usual, and write out a + snapshot of the data processed for future use. + + Args: + input_dataset: A `Tensor` of type `variant`. + A variant tensor representing the input dataset. + path: A `Tensor` of type `string`. + The path we should write snapshots to / read snapshots from. + reader_func_other_args: A list of `Tensor` objects. + shard_func_other_args: A list of `Tensor` objects. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + reader_func: A function decorated with @Defun. + Optional. A function to control how to read data from snapshot shards. + shard_func: A function decorated with @Defun. + Optional. A function to control how to shard data when writing a snapshot. + compression: An optional `string`. Defaults to `""`. + The type of compression to be applied to the saved snapshot files. + reader_prefix: An optional `string`. Defaults to `""`. + writer_prefix: An optional `string`. Defaults to `""`. + hash_valid: An optional `bool`. Defaults to `False`. + hash: An optional `int`. Defaults to `0`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SnapshotDatasetV2", name, input_dataset, path, + reader_func_other_args, shard_func_other_args, "output_types", + output_types, "output_shapes", output_shapes, "compression", + compression, "reader_prefix", reader_prefix, "writer_prefix", + writer_prefix, "hash_valid", hash_valid, "hash", hash, "reader_func", + reader_func, "shard_func", shard_func, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return snapshot_dataset_v2_eager_fallback( + input_dataset, path, reader_func_other_args, shard_func_other_args, + output_types=output_types, output_shapes=output_shapes, + compression=compression, reader_prefix=reader_prefix, + writer_prefix=writer_prefix, hash_valid=hash_valid, hash=hash, + reader_func=reader_func, shard_func=shard_func, metadata=metadata, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if reader_prefix is None: + reader_prefix = "" + reader_prefix = _execute.make_str(reader_prefix, "reader_prefix") + if writer_prefix is None: + writer_prefix = "" + writer_prefix = _execute.make_str(writer_prefix, "writer_prefix") + if hash_valid is None: + hash_valid = False + hash_valid = _execute.make_bool(hash_valid, "hash_valid") + if hash is None: + hash = 0 + hash = _execute.make_int(hash, "hash") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SnapshotDatasetV2", input_dataset=input_dataset, path=path, + reader_func_other_args=reader_func_other_args, + shard_func_other_args=shard_func_other_args, + output_types=output_types, + output_shapes=output_shapes, + reader_func=reader_func, shard_func=shard_func, + compression=compression, + reader_prefix=reader_prefix, + writer_prefix=writer_prefix, + hash_valid=hash_valid, hash=hash, + metadata=metadata, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "compression", + _op.get_attr("compression"), "reader_prefix", + _op.get_attr("reader_prefix"), "writer_prefix", + _op.get_attr("writer_prefix"), "hash_valid", + _op._get_attr_bool("hash_valid"), "hash", + _op._get_attr_int("hash"), "reader_func", + _op.get_attr("reader_func"), "shard_func", + _op.get_attr("shard_func"), "Treader_func_args", + _op.get_attr("Treader_func_args"), "Tshard_func_args", + _op.get_attr("Tshard_func_args"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SnapshotDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SnapshotDatasetV2 = tf_export("raw_ops.SnapshotDatasetV2")(_ops.to_raw_op(snapshot_dataset_v2)) + + +def snapshot_dataset_v2_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], path: Annotated[Any, _atypes.String], reader_func_other_args, shard_func_other_args, output_types, output_shapes, reader_func, shard_func, compression: str, reader_prefix: str, writer_prefix: str, hash_valid: bool, hash: int, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_dataset_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_dataset_v2' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if compression is None: + compression = "" + compression = _execute.make_str(compression, "compression") + if reader_prefix is None: + reader_prefix = "" + reader_prefix = _execute.make_str(reader_prefix, "reader_prefix") + if writer_prefix is None: + writer_prefix = "" + writer_prefix = _execute.make_str(writer_prefix, "writer_prefix") + if hash_valid is None: + hash_valid = False + hash_valid = _execute.make_bool(hash_valid, "hash_valid") + if hash is None: + hash = 0 + hash = _execute.make_int(hash, "hash") + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Treader_func_args, reader_func_other_args = _execute.convert_to_mixed_eager_tensors(reader_func_other_args, ctx) + _attr_Tshard_func_args, shard_func_other_args = _execute.convert_to_mixed_eager_tensors(shard_func_other_args, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + path = _ops.convert_to_tensor(path, _dtypes.string) + _inputs_flat = [input_dataset, path] + list(reader_func_other_args) + list(shard_func_other_args) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "compression", compression, "reader_prefix", reader_prefix, "writer_prefix", + writer_prefix, "hash_valid", hash_valid, "hash", hash, "reader_func", + reader_func, "shard_func", shard_func, "Treader_func_args", + _attr_Treader_func_args, "Tshard_func_args", _attr_Tshard_func_args, + "metadata", metadata) + _result = _execute.execute(b"SnapshotDatasetV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SnapshotDatasetV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def snapshot_nested_dataset_reader(inputs: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""TODO: add doc. + + Args: + inputs: A list of at least 1 `Tensor` objects with type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SnapshotNestedDatasetReader", name, inputs, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return snapshot_nested_dataset_reader_eager_fallback( + inputs, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SnapshotNestedDatasetReader", inputs=inputs, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "N", _op._get_attr_int("N")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SnapshotNestedDatasetReader", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SnapshotNestedDatasetReader = tf_export("raw_ops.SnapshotNestedDatasetReader")(_ops.to_raw_op(snapshot_nested_dataset_reader)) + + +def snapshot_nested_dataset_reader_eager_fallback(inputs: Annotated[List[Any], _atypes.Variant], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'snapshot_nested_dataset_reader' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + inputs = _ops.convert_n_to_tensor(inputs, _dtypes.variant) + _inputs_flat = list(inputs) + _attrs = ("output_types", output_types, "output_shapes", output_shapes, "N", + _attr_N) + _result = _execute.execute(b"SnapshotNestedDatasetReader", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SnapshotNestedDatasetReader", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sql_dataset(driver_name: Annotated[Any, _atypes.String], data_source_name: Annotated[Any, _atypes.String], query: Annotated[Any, _atypes.String], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that executes a SQL query and emits rows of the result set. + + Args: + driver_name: A `Tensor` of type `string`. + The database type. Currently, the only supported type is 'sqlite'. + data_source_name: A `Tensor` of type `string`. + A connection string to connect to the database. + query: A `Tensor` of type `string`. A SQL query to execute. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SqlDataset", name, driver_name, data_source_name, query, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sql_dataset_eager_fallback( + driver_name, data_source_name, query, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sql_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sql_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SqlDataset", driver_name=driver_name, + data_source_name=data_source_name, query=query, + output_types=output_types, output_shapes=output_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SqlDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SqlDataset = tf_export("raw_ops.SqlDataset")(_ops.to_raw_op(sql_dataset)) + + +def sql_dataset_eager_fallback(driver_name: Annotated[Any, _atypes.String], data_source_name: Annotated[Any, _atypes.String], query: Annotated[Any, _atypes.String], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'sql_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'sql_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + driver_name = _ops.convert_to_tensor(driver_name, _dtypes.string) + data_source_name = _ops.convert_to_tensor(data_source_name, _dtypes.string) + query = _ops.convert_to_tensor(query, _dtypes.string) + _inputs_flat = [driver_name, data_source_name, query] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"SqlDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SqlDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def stats_aggregator_handle(container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a statistics manager resource. + + Args: + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatsAggregatorHandle", name, "container", container, + "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stats_aggregator_handle_eager_fallback( + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatsAggregatorHandle", container=container, shared_name=shared_name, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatsAggregatorHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatsAggregatorHandle = tf_export("raw_ops.StatsAggregatorHandle")(_ops.to_raw_op(stats_aggregator_handle)) + + +def stats_aggregator_handle_eager_fallback(container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("container", container, "shared_name", shared_name) + _result = _execute.execute(b"StatsAggregatorHandle", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatsAggregatorHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def stats_aggregator_handle_v2(container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatsAggregatorHandleV2", name, "container", container, + "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stats_aggregator_handle_v2_eager_fallback( + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatsAggregatorHandleV2", container=container, + shared_name=shared_name, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatsAggregatorHandleV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatsAggregatorHandleV2 = tf_export("raw_ops.StatsAggregatorHandleV2")(_ops.to_raw_op(stats_aggregator_handle_v2)) + + +def stats_aggregator_handle_v2_eager_fallback(container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("container", container, "shared_name", shared_name) + _result = _execute.execute(b"StatsAggregatorHandleV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatsAggregatorHandleV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def stats_aggregator_set_summary_writer(stats_aggregator: Annotated[Any, _atypes.Resource], summary: Annotated[Any, _atypes.Resource], name=None): + r"""Set a summary_writer_interface to record statistics using given stats_aggregator. + + Args: + stats_aggregator: A `Tensor` of type `resource`. + summary: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatsAggregatorSetSummaryWriter", name, stats_aggregator, + summary) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stats_aggregator_set_summary_writer_eager_fallback( + stats_aggregator, summary, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatsAggregatorSetSummaryWriter", stats_aggregator=stats_aggregator, + summary=summary, name=name) + return _op +StatsAggregatorSetSummaryWriter = tf_export("raw_ops.StatsAggregatorSetSummaryWriter")(_ops.to_raw_op(stats_aggregator_set_summary_writer)) + + +def stats_aggregator_set_summary_writer_eager_fallback(stats_aggregator: Annotated[Any, _atypes.Resource], summary: Annotated[Any, _atypes.Resource], name, ctx): + stats_aggregator = _ops.convert_to_tensor(stats_aggregator, _dtypes.resource) + summary = _ops.convert_to_tensor(summary, _dtypes.resource) + _inputs_flat = [stats_aggregator, summary] + _attrs = None + _result = _execute.execute(b"StatsAggregatorSetSummaryWriter", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def stats_aggregator_summary(iterator: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.String]: + r"""Produces a summary of any statistics recorded by the given statistics manager. + + Args: + iterator: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatsAggregatorSummary", name, iterator) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stats_aggregator_summary_eager_fallback( + iterator, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatsAggregatorSummary", iterator=iterator, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatsAggregatorSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatsAggregatorSummary = tf_export("raw_ops.StatsAggregatorSummary")(_ops.to_raw_op(stats_aggregator_summary)) + + +def stats_aggregator_summary_eager_fallback(iterator: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.String]: + iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) + _inputs_flat = [iterator] + _attrs = None + _result = _execute.execute(b"StatsAggregatorSummary", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatsAggregatorSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def take_while_dataset(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that stops iteration when predicate` is false. + + The `predicate` function must return a scalar boolean and accept the + following arguments: + + * One tensor for each component of an element of `input_dataset`. + * One tensor for each value in `other_arguments`. + + Args: + input_dataset: A `Tensor` of type `variant`. + other_arguments: A list of `Tensor` objects. + A list of tensors, typically values that were captured when + building a closure for `predicate`. + predicate: A function decorated with @Defun. + A function returning a scalar boolean. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TakeWhileDataset", name, input_dataset, other_arguments, + "predicate", predicate, "output_types", output_types, "output_shapes", + output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return take_while_dataset_eager_fallback( + input_dataset, other_arguments, predicate=predicate, + output_types=output_types, output_shapes=output_shapes, + metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'take_while_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'take_while_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TakeWhileDataset", input_dataset=input_dataset, + other_arguments=other_arguments, + predicate=predicate, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("predicate", _op.get_attr("predicate"), "Targuments", + _op.get_attr("Targuments"), "output_types", + _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TakeWhileDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TakeWhileDataset = tf_export("raw_ops.TakeWhileDataset")(_ops.to_raw_op(take_while_dataset)) + + +def take_while_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], other_arguments, predicate, output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'take_while_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'take_while_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _attr_Targuments, other_arguments = _execute.convert_to_mixed_eager_tensors(other_arguments, ctx) + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + list(other_arguments) + _attrs = ("predicate", predicate, "Targuments", _attr_Targuments, + "output_types", output_types, "output_shapes", output_shapes, "metadata", + metadata) + _result = _execute.execute(b"TakeWhileDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TakeWhileDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def thread_pool_dataset(input_dataset: Annotated[Any, _atypes.Variant], thread_pool: Annotated[Any, _atypes.Resource], output_types, output_shapes, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + thread_pool: A `Tensor` of type `resource`. + A resource produced by the ThreadPoolHandle op. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ThreadPoolDataset", name, input_dataset, thread_pool, + "output_types", output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return thread_pool_dataset_eager_fallback( + input_dataset, thread_pool, output_types=output_types, + output_shapes=output_shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ThreadPoolDataset", input_dataset=input_dataset, + thread_pool=thread_pool, + output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ThreadPoolDataset = tf_export("raw_ops.ThreadPoolDataset")(_ops.to_raw_op(thread_pool_dataset)) + + +def thread_pool_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], thread_pool: Annotated[Any, _atypes.Resource], output_types, output_shapes, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'thread_pool_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'thread_pool_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + thread_pool = _ops.convert_to_tensor(thread_pool, _dtypes.resource) + _inputs_flat = [input_dataset, thread_pool] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"ThreadPoolDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ThreadPoolDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def thread_pool_handle(num_threads: int, display_name: str, max_intra_op_parallelism:int=1, container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a dataset that uses a custom thread pool to compute `input_dataset`. + + Args: + num_threads: An `int`. The number of threads in the thread pool. + display_name: A `string`. + A human-readable name for the threads that may be visible in some + visualizations. + threadpool. + max_intra_op_parallelism: An optional `int`. Defaults to `1`. + The maximum degree of parallelism to use within operations that execute on this + threadpool. + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ThreadPoolHandle", name, "num_threads", num_threads, + "max_intra_op_parallelism", max_intra_op_parallelism, "display_name", + display_name, "container", container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return thread_pool_handle_eager_fallback( + num_threads=num_threads, + max_intra_op_parallelism=max_intra_op_parallelism, + display_name=display_name, container=container, + shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_threads = _execute.make_int(num_threads, "num_threads") + display_name = _execute.make_str(display_name, "display_name") + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ThreadPoolHandle", num_threads=num_threads, + display_name=display_name, + max_intra_op_parallelism=max_intra_op_parallelism, + container=container, shared_name=shared_name, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_threads", _op._get_attr_int("num_threads"), + "max_intra_op_parallelism", + _op._get_attr_int("max_intra_op_parallelism"), "display_name", + _op.get_attr("display_name"), "container", + _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ThreadPoolHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ThreadPoolHandle = tf_export("raw_ops.ThreadPoolHandle")(_ops.to_raw_op(thread_pool_handle)) + + +def thread_pool_handle_eager_fallback(num_threads: int, display_name: str, max_intra_op_parallelism: int, container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + num_threads = _execute.make_int(num_threads, "num_threads") + display_name = _execute.make_str(display_name, "display_name") + if max_intra_op_parallelism is None: + max_intra_op_parallelism = 1 + max_intra_op_parallelism = _execute.make_int(max_intra_op_parallelism, "max_intra_op_parallelism") + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("num_threads", num_threads, "max_intra_op_parallelism", + max_intra_op_parallelism, "display_name", display_name, "container", + container, "shared_name", shared_name) + _result = _execute.execute(b"ThreadPoolHandle", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ThreadPoolHandle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def unbatch_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""A dataset that splits the elements of its input into multiple elements. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UnbatchDataset", name, input_dataset, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unbatch_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'unbatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'unbatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UnbatchDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UnbatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UnbatchDataset = tf_export("raw_ops.UnbatchDataset")(_ops.to_raw_op(unbatch_dataset)) + + +def unbatch_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'unbatch_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'unbatch_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"UnbatchDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UnbatchDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def uncompress_element(compressed: Annotated[Any, _atypes.Variant], output_types, output_shapes, name=None): + r"""Uncompresses a compressed dataset element. + + Args: + compressed: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UncompressElement", name, compressed, "output_types", + output_types, "output_shapes", output_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return uncompress_element_eager_fallback( + compressed, output_types=output_types, output_shapes=output_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'uncompress_element' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'uncompress_element' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UncompressElement", compressed=compressed, output_types=output_types, + output_shapes=output_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UncompressElement", _inputs_flat, _attrs, _result) + return _result + +UncompressElement = tf_export("raw_ops.UncompressElement")(_ops.to_raw_op(uncompress_element)) + + +def uncompress_element_eager_fallback(compressed: Annotated[Any, _atypes.Variant], output_types, output_shapes, name, ctx): + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'uncompress_element' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'uncompress_element' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + compressed = _ops.convert_to_tensor(compressed, _dtypes.variant) + _inputs_flat = [compressed] + _attrs = ("output_types", output_types, "output_shapes", output_shapes) + _result = _execute.execute(b"UncompressElement", len(output_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UncompressElement", _inputs_flat, _attrs, _result) + return _result + + +def unique_dataset(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata:str="", name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates a dataset that contains the unique elements of `input_dataset`. + + Args: + input_dataset: A `Tensor` of type `variant`. + output_types: A list of `tf.DTypes` that has length `>= 1`. + output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`. + metadata: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "UniqueDataset", name, input_dataset, "output_types", + output_types, "output_shapes", output_shapes, "metadata", metadata) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return unique_dataset_eager_fallback( + input_dataset, output_types=output_types, + output_shapes=output_shapes, metadata=metadata, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'unique_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'unique_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "UniqueDataset", input_dataset=input_dataset, + output_types=output_types, + output_shapes=output_shapes, metadata=metadata, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("output_types", _op.get_attr("output_types"), "output_shapes", + _op.get_attr("output_shapes"), "metadata", + _op.get_attr("metadata")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "UniqueDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +UniqueDataset = tf_export("raw_ops.UniqueDataset")(_ops.to_raw_op(unique_dataset)) + + +def unique_dataset_eager_fallback(input_dataset: Annotated[Any, _atypes.Variant], output_types, output_shapes, metadata: str, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'unique_dataset' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if not isinstance(output_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'output_shapes' argument to " + "'unique_dataset' Op, not %r." % output_shapes) + output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes] + if metadata is None: + metadata = "" + metadata = _execute.make_str(metadata, "metadata") + input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant) + _inputs_flat = [input_dataset] + _attrs = ("output_types", output_types, "output_shapes", output_shapes, + "metadata", metadata) + _result = _execute.execute(b"UniqueDataset", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "UniqueDataset", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_parsing_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_parsing_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..89ad75abddf8b84c4ff623c2f8d8e89c085cb2aa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_parsing_ops.py @@ -0,0 +1,2352 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def decode_csv(records: Annotated[Any, _atypes.String], record_defaults, field_delim:str=",", use_quote_delim:bool=True, na_value:str="", select_cols=[], name=None): + r"""Convert CSV records to tensors. Each column maps to one tensor. + + RFC 4180 format is expected for the CSV records. + (https://tools.ietf.org/html/rfc4180) + Note that we allow leading and trailing spaces with int or float field. + + Args: + records: A `Tensor` of type `string`. + Each string is a record/row in the csv and all records should have + the same format. + record_defaults: A list of `Tensor` objects with types from: `float32`, `float64`, `int32`, `int64`, `string`. + One tensor per column of the input record, with either a + scalar default value for that column or an empty vector if the column is + required. + field_delim: An optional `string`. Defaults to `","`. + char delimiter to separate fields in a record. + use_quote_delim: An optional `bool`. Defaults to `True`. + If false, treats double quotation marks as regular + characters inside of the string fields (ignoring RFC 4180, Section 2, + Bullet 5). + na_value: An optional `string`. Defaults to `""`. + Additional string to recognize as NA/NaN. + select_cols: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects. Has the same type as `record_defaults`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeCSV", name, records, record_defaults, "field_delim", + field_delim, "use_quote_delim", use_quote_delim, "na_value", na_value, + "select_cols", select_cols) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return decode_csv_eager_fallback( + records, record_defaults, field_delim=field_delim, + use_quote_delim=use_quote_delim, na_value=na_value, + select_cols=select_cols, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if field_delim is None: + field_delim = "," + field_delim = _execute.make_str(field_delim, "field_delim") + if use_quote_delim is None: + use_quote_delim = True + use_quote_delim = _execute.make_bool(use_quote_delim, "use_quote_delim") + if na_value is None: + na_value = "" + na_value = _execute.make_str(na_value, "na_value") + if select_cols is None: + select_cols = [] + if not isinstance(select_cols, (list, tuple)): + raise TypeError( + "Expected list for 'select_cols' argument to " + "'decode_csv' Op, not %r." % select_cols) + select_cols = [_execute.make_int(_i, "select_cols") for _i in select_cols] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeCSV", records=records, record_defaults=record_defaults, + field_delim=field_delim, use_quote_delim=use_quote_delim, + na_value=na_value, select_cols=select_cols, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("OUT_TYPE", _op.get_attr("OUT_TYPE"), "field_delim", + _op.get_attr("field_delim"), "use_quote_delim", + _op._get_attr_bool("use_quote_delim"), "na_value", + _op.get_attr("na_value"), "select_cols", + _op.get_attr("select_cols")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeCSV", _inputs_flat, _attrs, _result) + return _result + +DecodeCSV = tf_export("raw_ops.DecodeCSV")(_ops.to_raw_op(decode_csv)) + + +def decode_csv_eager_fallback(records: Annotated[Any, _atypes.String], record_defaults, field_delim: str, use_quote_delim: bool, na_value: str, select_cols, name, ctx): + if field_delim is None: + field_delim = "," + field_delim = _execute.make_str(field_delim, "field_delim") + if use_quote_delim is None: + use_quote_delim = True + use_quote_delim = _execute.make_bool(use_quote_delim, "use_quote_delim") + if na_value is None: + na_value = "" + na_value = _execute.make_str(na_value, "na_value") + if select_cols is None: + select_cols = [] + if not isinstance(select_cols, (list, tuple)): + raise TypeError( + "Expected list for 'select_cols' argument to " + "'decode_csv' Op, not %r." % select_cols) + select_cols = [_execute.make_int(_i, "select_cols") for _i in select_cols] + _attr_OUT_TYPE, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, ctx) + records = _ops.convert_to_tensor(records, _dtypes.string) + _inputs_flat = [records] + list(record_defaults) + _attrs = ("OUT_TYPE", _attr_OUT_TYPE, "field_delim", field_delim, + "use_quote_delim", use_quote_delim, "na_value", na_value, "select_cols", + select_cols) + _result = _execute.execute(b"DecodeCSV", len(record_defaults), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeCSV", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.decode_compressed', v1=['io.decode_compressed', 'decode_compressed']) +@deprecated_endpoints('decode_compressed') +def decode_compressed(bytes: Annotated[Any, _atypes.String], compression_type:str="", name=None) -> Annotated[Any, _atypes.String]: + r"""Decompress strings. + + This op decompresses each element of the `bytes` input `Tensor`, which + is assumed to be compressed using the given `compression_type`. + + The `output` is a string `Tensor` of the same shape as `bytes`, + each element containing the decompressed data from the corresponding + element in `bytes`. + + Args: + bytes: A `Tensor` of type `string`. + A Tensor of string which is compressed. + compression_type: An optional `string`. Defaults to `""`. + A scalar containing either (i) the empty string (no + compression), (ii) "ZLIB", or (iii) "GZIP". + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeCompressed", name, bytes, "compression_type", + compression_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_decode_compressed( + (bytes, compression_type, name,), None) + if _result is not NotImplemented: + return _result + return decode_compressed_eager_fallback( + bytes, compression_type=compression_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_compressed, (), dict(bytes=bytes, + compression_type=compression_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_decode_compressed( + (bytes, compression_type, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if compression_type is None: + compression_type = "" + compression_type = _execute.make_str(compression_type, "compression_type") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeCompressed", bytes=bytes, compression_type=compression_type, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_compressed, (), dict(bytes=bytes, + compression_type=compression_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("compression_type", _op.get_attr("compression_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeCompressed", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DecodeCompressed = tf_export("raw_ops.DecodeCompressed")(_ops.to_raw_op(decode_compressed)) +_dispatcher_for_decode_compressed = decode_compressed._tf_type_based_dispatcher.Dispatch + + +def decode_compressed_eager_fallback(bytes: Annotated[Any, _atypes.String], compression_type: str, name, ctx) -> Annotated[Any, _atypes.String]: + if compression_type is None: + compression_type = "" + compression_type = _execute.make_str(compression_type, "compression_type") + bytes = _ops.convert_to_tensor(bytes, _dtypes.string) + _inputs_flat = [bytes] + _attrs = ("compression_type", compression_type) + _result = _execute.execute(b"DecodeCompressed", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeCompressed", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def decode_json_example(json_examples: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.String]: + r"""Convert JSON-encoded Example records to binary protocol buffer strings. + + + Note: This is **not** a general purpose JSON parsing op. + + This op converts JSON-serialized + `tf.train.Example` (created with `json_format.MessageToJson`, following the + [standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json)) + to a binary-serialized `tf.train.Example` (equivalent to + `Example.SerializeToString()`) suitable for conversion to tensors with + `tf.io.parse_example`. + + Args: + json_examples: A `Tensor` of type `string`. + Each string is a JSON object serialized according to the JSON + mapping of the Example proto. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeJSONExample", name, json_examples) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return decode_json_example_eager_fallback( + json_examples, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeJSONExample", json_examples=json_examples, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeJSONExample", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DecodeJSONExample = tf_export("raw_ops.DecodeJSONExample")(_ops.to_raw_op(decode_json_example)) + + +def decode_json_example_eager_fallback(json_examples: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.String]: + json_examples = _ops.convert_to_tensor(json_examples, _dtypes.string) + _inputs_flat = [json_examples] + _attrs = None + _result = _execute.execute(b"DecodeJSONExample", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeJSONExample", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_DecodePaddedRaw_out_type = TypeVar("TV_DecodePaddedRaw_out_type", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt8) + +def decode_padded_raw(input_bytes: Annotated[Any, _atypes.String], fixed_length: Annotated[Any, _atypes.Int32], out_type: TV_DecodePaddedRaw_out_type, little_endian:bool=True, name=None) -> Annotated[Any, TV_DecodePaddedRaw_out_type]: + r"""Reinterpret the bytes of a string as a vector of numbers. + + Args: + input_bytes: A `Tensor` of type `string`. Tensor of string to be decoded. + fixed_length: A `Tensor` of type `int32`. + Length in bytes for each element of the decoded output. Must be a multiple + of the size of the output type. + out_type: A `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.uint16, tf.uint8, tf.int16, tf.int8, tf.int64, tf.bfloat16`. + little_endian: An optional `bool`. Defaults to `True`. + Whether the input `input_bytes` is in little-endian order. Ignored for + `out_type` values that are stored in a single byte, like `uint8` + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `out_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodePaddedRaw", name, input_bytes, fixed_length, "out_type", + out_type, "little_endian", little_endian) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return decode_padded_raw_eager_fallback( + input_bytes, fixed_length, out_type=out_type, + little_endian=little_endian, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + out_type = _execute.make_type(out_type, "out_type") + if little_endian is None: + little_endian = True + little_endian = _execute.make_bool(little_endian, "little_endian") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodePaddedRaw", input_bytes=input_bytes, fixed_length=fixed_length, + out_type=out_type, little_endian=little_endian, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("out_type", _op._get_attr_type("out_type"), "little_endian", + _op._get_attr_bool("little_endian")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodePaddedRaw", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DecodePaddedRaw = tf_export("raw_ops.DecodePaddedRaw")(_ops.to_raw_op(decode_padded_raw)) + + +def decode_padded_raw_eager_fallback(input_bytes: Annotated[Any, _atypes.String], fixed_length: Annotated[Any, _atypes.Int32], out_type: TV_DecodePaddedRaw_out_type, little_endian: bool, name, ctx) -> Annotated[Any, TV_DecodePaddedRaw_out_type]: + out_type = _execute.make_type(out_type, "out_type") + if little_endian is None: + little_endian = True + little_endian = _execute.make_bool(little_endian, "little_endian") + input_bytes = _ops.convert_to_tensor(input_bytes, _dtypes.string) + fixed_length = _ops.convert_to_tensor(fixed_length, _dtypes.int32) + _inputs_flat = [input_bytes, fixed_length] + _attrs = ("out_type", out_type, "little_endian", little_endian) + _result = _execute.execute(b"DecodePaddedRaw", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodePaddedRaw", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_DecodeRaw_out_type = TypeVar("TV_DecodeRaw_out_type", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt8) + +def decode_raw(bytes: Annotated[Any, _atypes.String], out_type: TV_DecodeRaw_out_type, little_endian:bool=True, name=None) -> Annotated[Any, TV_DecodeRaw_out_type]: + r"""Reinterpret the bytes of a string as a vector of numbers. + + Args: + bytes: A `Tensor` of type `string`. + All the elements must have the same length. + out_type: A `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.uint16, tf.uint8, tf.int16, tf.int8, tf.int64, tf.complex64, tf.complex128, tf.bool, tf.bfloat16`. + little_endian: An optional `bool`. Defaults to `True`. + Whether the input `bytes` are in little-endian order. + Ignored for `out_type` values that are stored in a single byte like + `uint8`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `out_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeRaw", name, bytes, "out_type", out_type, "little_endian", + little_endian) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return decode_raw_eager_fallback( + bytes, out_type=out_type, little_endian=little_endian, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + out_type = _execute.make_type(out_type, "out_type") + if little_endian is None: + little_endian = True + little_endian = _execute.make_bool(little_endian, "little_endian") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeRaw", bytes=bytes, out_type=out_type, + little_endian=little_endian, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("out_type", _op._get_attr_type("out_type"), "little_endian", + _op._get_attr_bool("little_endian")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeRaw", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DecodeRaw = tf_export("raw_ops.DecodeRaw")(_ops.to_raw_op(decode_raw)) + + +def decode_raw_eager_fallback(bytes: Annotated[Any, _atypes.String], out_type: TV_DecodeRaw_out_type, little_endian: bool, name, ctx) -> Annotated[Any, TV_DecodeRaw_out_type]: + out_type = _execute.make_type(out_type, "out_type") + if little_endian is None: + little_endian = True + little_endian = _execute.make_bool(little_endian, "little_endian") + bytes = _ops.convert_to_tensor(bytes, _dtypes.string) + _inputs_flat = [bytes] + _attrs = ("out_type", out_type, "little_endian", little_endian) + _result = _execute.execute(b"DecodeRaw", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeRaw", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_ParseExampleOutput = collections.namedtuple( + "ParseExample", + ["sparse_indices", "sparse_values", "sparse_shapes", "dense_values"]) + + +def parse_example(serialized: Annotated[Any, _atypes.String], names: Annotated[Any, _atypes.String], sparse_keys: Annotated[List[Any], _atypes.String], dense_keys: Annotated[List[Any], _atypes.String], dense_defaults, sparse_types, dense_shapes, name=None): + r"""Transforms a vector of brain.Example protos (as strings) into typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A vector containing a batch of binary serialized Example protos. + names: A `Tensor` of type `string`. + A vector containing the names of the serialized protos. + May contain, for example, table key (descriptive) names for the + corresponding serialized protos. These are purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no names are available. + If non-empty, this vector must be the same length as "serialized". + sparse_keys: A list of `Tensor` objects with type `string`. + A list of Nsparse string Tensors (scalars). + The keys expected in the Examples' features associated with sparse values. + dense_keys: A list of `Tensor` objects with type `string`. + A list of Ndense string Tensors (scalars). + The keys expected in the Examples' features associated with dense values. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Ndense Tensors (some may be empty). + dense_defaults[j] provides default values + when the example's feature_map lacks dense_key[j]. If an empty Tensor is + provided for dense_defaults[j], then the Feature dense_keys[j] is required. + The input type is inferred from dense_defaults[j], even when it's empty. + If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + then the shape of dense_defaults[j] must match that of dense_shapes[j]. + If dense_shapes[j] has an undefined major dimension (variable strides dense + feature), dense_defaults[j] must contain a single element: + the padding element. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of Nsparse types; the data types of data in each Feature + given in sparse_keys. + Currently the ParseExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + A list of Ndense shapes; the shapes of data in each Feature + given in dense_keys. + The number of elements in the Feature corresponding to dense_key[j] + must always equal dense_shapes[j].NumEntries(). + If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + The dense outputs are just the inputs row-stacked by batch. + This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + the shape of the output Tensor dense_values[j] will be + (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + of elements of length D1 * .... * DN, across all minibatch entries + in the input. Any minibatch entry with less than M blocks of elements of + length D1 * ... * DN will be padded with the corresponding default_value + scalar element along the second dimension. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shapes, dense_values). + + sparse_indices: A list with the same length as `sparse_keys` of `Tensor` objects with type `int64`. + sparse_values: A list of `Tensor` objects of type `sparse_types`. + sparse_shapes: A list with the same length as `sparse_keys` of `Tensor` objects with type `int64`. + dense_values: A list of `Tensor` objects. Has the same type as `dense_defaults`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseExample", name, serialized, names, sparse_keys, + dense_keys, dense_defaults, "sparse_types", sparse_types, + "dense_shapes", dense_shapes) + _result = _ParseExampleOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_example_eager_fallback( + serialized, names, sparse_keys, dense_keys, dense_defaults, + sparse_types=sparse_types, dense_shapes=dense_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example' Op, not %r." % sparse_keys) + _attr_Nsparse = len(sparse_keys) + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example' Op, not %r." % dense_keys) + _attr_Ndense = len(dense_keys) + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseExample", serialized=serialized, names=names, + sparse_keys=sparse_keys, dense_keys=dense_keys, + dense_defaults=dense_defaults, + sparse_types=sparse_types, dense_shapes=dense_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Nsparse", _op._get_attr_int("Nsparse"), "Ndense", + _op._get_attr_int("Ndense"), "sparse_types", + _op.get_attr("sparse_types"), "Tdense", _op.get_attr("Tdense"), + "dense_shapes", _op.get_attr("dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseExample", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_Nsparse]] + _result[_attr_Nsparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + _attr_Nsparse]] + _result[2 + _attr_Nsparse:] + _result = _result[:3] + [_result[3:]] + _result = _ParseExampleOutput._make(_result) + return _result + +ParseExample = tf_export("raw_ops.ParseExample")(_ops.to_raw_op(parse_example)) + + +def parse_example_eager_fallback(serialized: Annotated[Any, _atypes.String], names: Annotated[Any, _atypes.String], sparse_keys: Annotated[List[Any], _atypes.String], dense_keys: Annotated[List[Any], _atypes.String], dense_defaults, sparse_types, dense_shapes, name, ctx): + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_example' Op, not %r." % sparse_keys) + _attr_Nsparse = len(sparse_keys) + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_example' Op, not %r." % dense_keys) + _attr_Ndense = len(dense_keys) + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + names = _ops.convert_to_tensor(names, _dtypes.string) + sparse_keys = _ops.convert_n_to_tensor(sparse_keys, _dtypes.string) + dense_keys = _ops.convert_n_to_tensor(dense_keys, _dtypes.string) + _inputs_flat = [serialized, names] + list(sparse_keys) + list(dense_keys) + list(dense_defaults) + _attrs = ("Nsparse", _attr_Nsparse, "Ndense", _attr_Ndense, "sparse_types", + sparse_types, "Tdense", _attr_Tdense, "dense_shapes", dense_shapes) + _result = _execute.execute(b"ParseExample", _attr_Nsparse + + len(sparse_types) + _attr_Nsparse + + len(dense_defaults), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseExample", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_Nsparse]] + _result[_attr_Nsparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + _attr_Nsparse]] + _result[2 + _attr_Nsparse:] + _result = _result[:3] + [_result[3:]] + _result = _ParseExampleOutput._make(_result) + return _result + +_ParseExampleV2Output = collections.namedtuple( + "ParseExampleV2", + ["sparse_indices", "sparse_values", "sparse_shapes", "dense_values", "ragged_values", "ragged_row_splits"]) + + +def parse_example_v2(serialized: Annotated[Any, _atypes.String], names: Annotated[Any, _atypes.String], sparse_keys: Annotated[Any, _atypes.String], dense_keys: Annotated[Any, _atypes.String], ragged_keys: Annotated[Any, _atypes.String], dense_defaults, num_sparse: int, sparse_types, ragged_value_types, ragged_split_types, dense_shapes, name=None): + r"""Transforms a vector of tf.Example protos (as strings) into typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A scalar or vector containing binary serialized Example protos. + names: A `Tensor` of type `string`. + A tensor containing the names of the serialized protos. + Corresponds 1:1 with the `serialized` tensor. + May contain, for example, table key (descriptive) names for the + corresponding serialized protos. These are purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no names are available. + If non-empty, this tensor must have the same shape as "serialized". + sparse_keys: A `Tensor` of type `string`. Vector of strings. + The keys expected in the Examples' features associated with sparse values. + dense_keys: A `Tensor` of type `string`. Vector of strings. + The keys expected in the Examples' features associated with dense values. + ragged_keys: A `Tensor` of type `string`. Vector of strings. + The keys expected in the Examples' features associated with ragged values. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`. + dense_defaults[j] provides default values + when the example's feature_map lacks dense_key[j]. If an empty Tensor is + provided for dense_defaults[j], then the Feature dense_keys[j] is required. + The input type is inferred from dense_defaults[j], even when it's empty. + If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + then the shape of dense_defaults[j] must match that of dense_shapes[j]. + If dense_shapes[j] has an undefined major dimension (variable strides dense + feature), dense_defaults[j] must contain a single element: + the padding element. + num_sparse: An `int` that is `>= 0`. The number of sparse keys. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `num_sparse` types; the data types of data in each Feature + given in sparse_keys. + Currently the ParseExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + ragged_value_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `num_ragged` types; the data types of data in each Feature + given in ragged_keys (where `num_ragged = sparse_keys.size()`). + Currently the ParseExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + ragged_split_types: A list of `tf.DTypes` from: `tf.int32, tf.int64`. + A list of `num_ragged` types; the data types of row_splits in each Feature + given in ragged_keys (where `num_ragged = sparse_keys.size()`). + May be DT_INT32 or DT_INT64. + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + A list of `num_dense` shapes; the shapes of data in each Feature + given in dense_keys (where `num_dense = dense_keys.size()`). + The number of elements in the Feature corresponding to dense_key[j] + must always equal dense_shapes[j].NumEntries(). + If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + The dense outputs are just the inputs row-stacked by batch. + This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + the shape of the output Tensor dense_values[j] will be + (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + of elements of length D1 * .... * DN, across all minibatch entries + in the input. Any minibatch entry with less than M blocks of elements of + length D1 * ... * DN will be padded with the corresponding default_value + scalar element along the second dimension. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits). + + sparse_indices: A list of `num_sparse` `Tensor` objects with type `int64`. + sparse_values: A list of `Tensor` objects of type `sparse_types`. + sparse_shapes: A list of `num_sparse` `Tensor` objects with type `int64`. + dense_values: A list of `Tensor` objects. Has the same type as `dense_defaults`. + ragged_values: A list of `Tensor` objects of type `ragged_value_types`. + ragged_row_splits: A list of `Tensor` objects of type `ragged_split_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseExampleV2", name, serialized, names, sparse_keys, + dense_keys, ragged_keys, dense_defaults, "num_sparse", num_sparse, + "sparse_types", sparse_types, "ragged_value_types", + ragged_value_types, "ragged_split_types", ragged_split_types, + "dense_shapes", dense_shapes) + _result = _ParseExampleV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_example_v2_eager_fallback( + serialized, names, sparse_keys, dense_keys, ragged_keys, + dense_defaults, num_sparse=num_sparse, sparse_types=sparse_types, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, dense_shapes=dense_shapes, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_sparse = _execute.make_int(num_sparse, "num_sparse") + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_v2' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_v2' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_v2' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_v2' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseExampleV2", serialized=serialized, names=names, + sparse_keys=sparse_keys, dense_keys=dense_keys, + ragged_keys=ragged_keys, + dense_defaults=dense_defaults, + num_sparse=num_sparse, sparse_types=sparse_types, + ragged_value_types=ragged_value_types, + ragged_split_types=ragged_split_types, + dense_shapes=dense_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tdense", _op.get_attr("Tdense"), "num_sparse", + _op._get_attr_int("num_sparse"), "sparse_types", + _op.get_attr("sparse_types"), "ragged_value_types", + _op.get_attr("ragged_value_types"), "ragged_split_types", + _op.get_attr("ragged_split_types"), "dense_shapes", + _op.get_attr("dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseExampleV2", _inputs_flat, _attrs, _result) + _result = [_result[:num_sparse]] + _result[num_sparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + num_sparse]] + _result[2 + num_sparse:] + _result = _result[:3] + [_result[3:3 + len(dense_defaults)]] + _result[3 + len(dense_defaults):] + _result = _result[:4] + [_result[4:4 + len(ragged_value_types)]] + _result[4 + len(ragged_value_types):] + _result = _result[:5] + [_result[5:]] + _result = _ParseExampleV2Output._make(_result) + return _result + +ParseExampleV2 = tf_export("raw_ops.ParseExampleV2")(_ops.to_raw_op(parse_example_v2)) + + +def parse_example_v2_eager_fallback(serialized: Annotated[Any, _atypes.String], names: Annotated[Any, _atypes.String], sparse_keys: Annotated[Any, _atypes.String], dense_keys: Annotated[Any, _atypes.String], ragged_keys: Annotated[Any, _atypes.String], dense_defaults, num_sparse: int, sparse_types, ragged_value_types, ragged_split_types, dense_shapes, name, ctx): + num_sparse = _execute.make_int(num_sparse, "num_sparse") + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_example_v2' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_value_types' argument to " + "'parse_example_v2' Op, not %r." % ragged_value_types) + ragged_value_types = [_execute.make_type(_t, "ragged_value_types") for _t in ragged_value_types] + if not isinstance(ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'ragged_split_types' argument to " + "'parse_example_v2' Op, not %r." % ragged_split_types) + ragged_split_types = [_execute.make_type(_t, "ragged_split_types") for _t in ragged_split_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_example_v2' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + names = _ops.convert_to_tensor(names, _dtypes.string) + sparse_keys = _ops.convert_to_tensor(sparse_keys, _dtypes.string) + dense_keys = _ops.convert_to_tensor(dense_keys, _dtypes.string) + ragged_keys = _ops.convert_to_tensor(ragged_keys, _dtypes.string) + _inputs_flat = [serialized, names, sparse_keys, dense_keys, ragged_keys] + list(dense_defaults) + _attrs = ("Tdense", _attr_Tdense, "num_sparse", num_sparse, "sparse_types", + sparse_types, "ragged_value_types", ragged_value_types, + "ragged_split_types", ragged_split_types, "dense_shapes", dense_shapes) + _result = _execute.execute(b"ParseExampleV2", num_sparse + len(sparse_types) + + num_sparse + len(dense_defaults) + + len(ragged_value_types) + + len(ragged_split_types), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseExampleV2", _inputs_flat, _attrs, _result) + _result = [_result[:num_sparse]] + _result[num_sparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + num_sparse]] + _result[2 + num_sparse:] + _result = _result[:3] + [_result[3:3 + len(dense_defaults)]] + _result[3 + len(dense_defaults):] + _result = _result[:4] + [_result[4:4 + len(ragged_value_types)]] + _result[4 + len(ragged_value_types):] + _result = _result[:5] + [_result[5:]] + _result = _ParseExampleV2Output._make(_result) + return _result + +_ParseSequenceExampleOutput = collections.namedtuple( + "ParseSequenceExample", + ["context_sparse_indices", "context_sparse_values", "context_sparse_shapes", "context_dense_values", "feature_list_sparse_indices", "feature_list_sparse_values", "feature_list_sparse_shapes", "feature_list_dense_values", "feature_list_dense_lengths"]) + + +def parse_sequence_example(serialized: Annotated[Any, _atypes.String], debug_name: Annotated[Any, _atypes.String], context_dense_defaults, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, Ncontext_sparse:int=0, Ncontext_dense:int=0, Nfeature_list_sparse:int=0, Nfeature_list_dense:int=0, context_sparse_types=[], feature_list_dense_types=[], context_dense_shapes=[], feature_list_sparse_types=[], feature_list_dense_shapes=[], name=None): + r"""Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A vector containing binary serialized SequenceExample protos. + debug_name: A `Tensor` of type `string`. + A vector containing the names of the serialized protos. + May contain, for example, table key (descriptive) name for the + corresponding serialized proto. This is purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no name is available. + context_dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Ncontext_dense Tensors (some may be empty). + context_dense_defaults[j] provides default values + when the SequenceExample's context map lacks context_dense_key[j]. + If an empty Tensor is provided for context_dense_defaults[j], + then the Feature context_dense_keys[j] is required. + The input type is inferred from context_dense_defaults[j], even when it's + empty. If context_dense_defaults[j] is not empty, its shape must match + context_dense_shapes[j]. + feature_list_dense_missing_assumed_empty: A list of `strings`. + A vector listing the + FeatureList keys which may be missing from the SequenceExamples. If the + associated FeatureList is missing, it is treated as empty. By default, + any FeatureList not listed in this vector must exist in the SequenceExamples. + context_sparse_keys: A list of `strings`. + A list of Ncontext_sparse string Tensors (scalars). + The keys expected in the Examples' features associated with context_sparse + values. + context_dense_keys: A list of `strings`. + A list of Ncontext_dense string Tensors (scalars). + The keys expected in the SequenceExamples' context features associated with + dense values. + feature_list_sparse_keys: A list of `strings`. + A list of Nfeature_list_sparse string Tensors + (scalars). The keys expected in the FeatureLists associated with sparse + values. + feature_list_dense_keys: A list of `strings`. + A list of Nfeature_list_dense string Tensors (scalars). + The keys expected in the SequenceExamples' feature_lists associated + with lists of dense values. + Ncontext_sparse: An optional `int` that is `>= 0`. Defaults to `0`. + Ncontext_dense: An optional `int` that is `>= 0`. Defaults to `0`. + Nfeature_list_sparse: An optional `int` that is `>= 0`. Defaults to `0`. + Nfeature_list_dense: An optional `int` that is `>= 0`. Defaults to `0`. + context_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Ncontext_sparse types; the data types of data in + each context Feature given in context_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + feature_list_dense_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + context_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Ncontext_dense shapes; the shapes of data in + each context Feature given in context_dense_keys. + The number of elements in the Feature corresponding to context_dense_key[j] + must always equal context_dense_shapes[j].NumEntries(). + The shape of context_dense_values[j] will match context_dense_shapes[j]. + feature_list_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Nfeature_list_sparse types; the data types + of data in each FeatureList given in feature_list_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + feature_list_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Nfeature_list_dense shapes; the shapes of + data in each FeatureList given in feature_list_dense_keys. + The shape of each Feature in the FeatureList corresponding to + feature_list_dense_key[j] must always equal + feature_list_dense_shapes[j].NumEntries(). + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths). + + context_sparse_indices: A list of `Ncontext_sparse` `Tensor` objects with type `int64`. + context_sparse_values: A list of `Tensor` objects of type `context_sparse_types`. + context_sparse_shapes: A list of `Ncontext_sparse` `Tensor` objects with type `int64`. + context_dense_values: A list of `Tensor` objects. Has the same type as `context_dense_defaults`. + feature_list_sparse_indices: A list of `Nfeature_list_sparse` `Tensor` objects with type `int64`. + feature_list_sparse_values: A list of `Tensor` objects of type `feature_list_sparse_types`. + feature_list_sparse_shapes: A list of `Nfeature_list_sparse` `Tensor` objects with type `int64`. + feature_list_dense_values: A list of `Tensor` objects of type `feature_list_dense_types`. + feature_list_dense_lengths: A list of `Nfeature_list_dense` `Tensor` objects with type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseSequenceExample", name, serialized, debug_name, + context_dense_defaults, "feature_list_dense_missing_assumed_empty", + feature_list_dense_missing_assumed_empty, "context_sparse_keys", + context_sparse_keys, "context_dense_keys", context_dense_keys, + "feature_list_sparse_keys", feature_list_sparse_keys, + "feature_list_dense_keys", feature_list_dense_keys, "Ncontext_sparse", + Ncontext_sparse, "Ncontext_dense", Ncontext_dense, + "Nfeature_list_sparse", Nfeature_list_sparse, "Nfeature_list_dense", + Nfeature_list_dense, "context_sparse_types", context_sparse_types, + "feature_list_dense_types", feature_list_dense_types, + "context_dense_shapes", context_dense_shapes, + "feature_list_sparse_types", feature_list_sparse_types, + "feature_list_dense_shapes", feature_list_dense_shapes) + _result = _ParseSequenceExampleOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_sequence_example_eager_fallback( + serialized, debug_name, context_dense_defaults, + feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, + context_sparse_keys=context_sparse_keys, + context_dense_keys=context_dense_keys, + feature_list_sparse_keys=feature_list_sparse_keys, + feature_list_dense_keys=feature_list_dense_keys, + Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, + Nfeature_list_sparse=Nfeature_list_sparse, + Nfeature_list_dense=Nfeature_list_dense, + context_sparse_types=context_sparse_types, + feature_list_dense_types=feature_list_dense_types, + context_dense_shapes=context_dense_shapes, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_dense_shapes=feature_list_dense_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(feature_list_dense_missing_assumed_empty, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_missing_assumed_empty' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_missing_assumed_empty) + feature_list_dense_missing_assumed_empty = [_execute.make_str(_s, "feature_list_dense_missing_assumed_empty") for _s in feature_list_dense_missing_assumed_empty] + if not isinstance(context_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_keys' argument to " + "'parse_sequence_example' Op, not %r." % context_sparse_keys) + context_sparse_keys = [_execute.make_str(_s, "context_sparse_keys") for _s in context_sparse_keys] + if not isinstance(context_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_keys' argument to " + "'parse_sequence_example' Op, not %r." % context_dense_keys) + context_dense_keys = [_execute.make_str(_s, "context_dense_keys") for _s in context_dense_keys] + if not isinstance(feature_list_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_keys' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_sparse_keys) + feature_list_sparse_keys = [_execute.make_str(_s, "feature_list_sparse_keys") for _s in feature_list_sparse_keys] + if not isinstance(feature_list_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_keys' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_keys) + feature_list_dense_keys = [_execute.make_str(_s, "feature_list_dense_keys") for _s in feature_list_dense_keys] + if Ncontext_sparse is None: + Ncontext_sparse = 0 + Ncontext_sparse = _execute.make_int(Ncontext_sparse, "Ncontext_sparse") + if Ncontext_dense is None: + Ncontext_dense = 0 + Ncontext_dense = _execute.make_int(Ncontext_dense, "Ncontext_dense") + if Nfeature_list_sparse is None: + Nfeature_list_sparse = 0 + Nfeature_list_sparse = _execute.make_int(Nfeature_list_sparse, "Nfeature_list_sparse") + if Nfeature_list_dense is None: + Nfeature_list_dense = 0 + Nfeature_list_dense = _execute.make_int(Nfeature_list_dense, "Nfeature_list_dense") + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_sequence_example' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_sequence_example' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseSequenceExample", serialized=serialized, debug_name=debug_name, + context_dense_defaults=context_dense_defaults, + feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, + context_sparse_keys=context_sparse_keys, + context_dense_keys=context_dense_keys, + feature_list_sparse_keys=feature_list_sparse_keys, + feature_list_dense_keys=feature_list_dense_keys, + Ncontext_sparse=Ncontext_sparse, + Ncontext_dense=Ncontext_dense, + Nfeature_list_sparse=Nfeature_list_sparse, + Nfeature_list_dense=Nfeature_list_dense, + context_sparse_types=context_sparse_types, + feature_list_dense_types=feature_list_dense_types, + context_dense_shapes=context_dense_shapes, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_dense_shapes=feature_list_dense_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("feature_list_dense_missing_assumed_empty", + _op.get_attr("feature_list_dense_missing_assumed_empty"), + "context_sparse_keys", _op.get_attr("context_sparse_keys"), + "context_dense_keys", _op.get_attr("context_dense_keys"), + "feature_list_sparse_keys", + _op.get_attr("feature_list_sparse_keys"), + "feature_list_dense_keys", + _op.get_attr("feature_list_dense_keys"), "Ncontext_sparse", + _op._get_attr_int("Ncontext_sparse"), "Ncontext_dense", + _op._get_attr_int("Ncontext_dense"), "Nfeature_list_sparse", + _op._get_attr_int("Nfeature_list_sparse"), + "Nfeature_list_dense", _op._get_attr_int("Nfeature_list_dense"), + "context_sparse_types", _op.get_attr("context_sparse_types"), + "Tcontext_dense", _op.get_attr("Tcontext_dense"), + "feature_list_dense_types", + _op.get_attr("feature_list_dense_types"), + "context_dense_shapes", _op.get_attr("context_dense_shapes"), + "feature_list_sparse_types", + _op.get_attr("feature_list_sparse_types"), + "feature_list_dense_shapes", + _op.get_attr("feature_list_dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseSequenceExample", _inputs_flat, _attrs, _result) + _result = [_result[:Ncontext_sparse]] + _result[Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + Ncontext_sparse]] + _result[2 + Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + Nfeature_list_sparse]] + _result[4 + Nfeature_list_sparse:] + _result = _result[:5] + [_result[5:5 + len(feature_list_sparse_types)]] + _result[5 + len(feature_list_sparse_types):] + _result = _result[:6] + [_result[6:6 + Nfeature_list_sparse]] + _result[6 + Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:7 + len(feature_list_dense_types)]] + _result[7 + len(feature_list_dense_types):] + _result = _result[:8] + [_result[8:]] + _result = _ParseSequenceExampleOutput._make(_result) + return _result + +ParseSequenceExample = tf_export("raw_ops.ParseSequenceExample")(_ops.to_raw_op(parse_sequence_example)) + + +def parse_sequence_example_eager_fallback(serialized: Annotated[Any, _atypes.String], debug_name: Annotated[Any, _atypes.String], context_dense_defaults, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, Ncontext_sparse: int, Ncontext_dense: int, Nfeature_list_sparse: int, Nfeature_list_dense: int, context_sparse_types, feature_list_dense_types, context_dense_shapes, feature_list_sparse_types, feature_list_dense_shapes, name, ctx): + if not isinstance(feature_list_dense_missing_assumed_empty, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_missing_assumed_empty' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_missing_assumed_empty) + feature_list_dense_missing_assumed_empty = [_execute.make_str(_s, "feature_list_dense_missing_assumed_empty") for _s in feature_list_dense_missing_assumed_empty] + if not isinstance(context_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_keys' argument to " + "'parse_sequence_example' Op, not %r." % context_sparse_keys) + context_sparse_keys = [_execute.make_str(_s, "context_sparse_keys") for _s in context_sparse_keys] + if not isinstance(context_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_keys' argument to " + "'parse_sequence_example' Op, not %r." % context_dense_keys) + context_dense_keys = [_execute.make_str(_s, "context_dense_keys") for _s in context_dense_keys] + if not isinstance(feature_list_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_keys' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_sparse_keys) + feature_list_sparse_keys = [_execute.make_str(_s, "feature_list_sparse_keys") for _s in feature_list_sparse_keys] + if not isinstance(feature_list_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_keys' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_keys) + feature_list_dense_keys = [_execute.make_str(_s, "feature_list_dense_keys") for _s in feature_list_dense_keys] + if Ncontext_sparse is None: + Ncontext_sparse = 0 + Ncontext_sparse = _execute.make_int(Ncontext_sparse, "Ncontext_sparse") + if Ncontext_dense is None: + Ncontext_dense = 0 + Ncontext_dense = _execute.make_int(Ncontext_dense, "Ncontext_dense") + if Nfeature_list_sparse is None: + Nfeature_list_sparse = 0 + Nfeature_list_sparse = _execute.make_int(Nfeature_list_sparse, "Nfeature_list_sparse") + if Nfeature_list_dense is None: + Nfeature_list_dense = 0 + Nfeature_list_dense = _execute.make_int(Nfeature_list_dense, "Nfeature_list_dense") + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_sequence_example' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_sequence_example' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_sequence_example' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _attr_Tcontext_dense, context_dense_defaults = _execute.convert_to_mixed_eager_tensors(context_dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + debug_name = _ops.convert_to_tensor(debug_name, _dtypes.string) + _inputs_flat = [serialized, debug_name] + list(context_dense_defaults) + _attrs = ("feature_list_dense_missing_assumed_empty", + feature_list_dense_missing_assumed_empty, "context_sparse_keys", + context_sparse_keys, "context_dense_keys", context_dense_keys, + "feature_list_sparse_keys", feature_list_sparse_keys, + "feature_list_dense_keys", feature_list_dense_keys, "Ncontext_sparse", + Ncontext_sparse, "Ncontext_dense", Ncontext_dense, "Nfeature_list_sparse", + Nfeature_list_sparse, "Nfeature_list_dense", Nfeature_list_dense, + "context_sparse_types", context_sparse_types, "Tcontext_dense", + _attr_Tcontext_dense, "feature_list_dense_types", feature_list_dense_types, + "context_dense_shapes", context_dense_shapes, "feature_list_sparse_types", + feature_list_sparse_types, "feature_list_dense_shapes", + feature_list_dense_shapes) + _result = _execute.execute(b"ParseSequenceExample", Ncontext_sparse + + len(context_sparse_types) + Ncontext_sparse + + len(context_dense_defaults) + + Nfeature_list_sparse + + len(feature_list_sparse_types) + + Nfeature_list_sparse + + len(feature_list_dense_types) + + Nfeature_list_dense, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseSequenceExample", _inputs_flat, _attrs, _result) + _result = [_result[:Ncontext_sparse]] + _result[Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + Ncontext_sparse]] + _result[2 + Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + Nfeature_list_sparse]] + _result[4 + Nfeature_list_sparse:] + _result = _result[:5] + [_result[5:5 + len(feature_list_sparse_types)]] + _result[5 + len(feature_list_sparse_types):] + _result = _result[:6] + [_result[6:6 + Nfeature_list_sparse]] + _result[6 + Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:7 + len(feature_list_dense_types)]] + _result[7 + len(feature_list_dense_types):] + _result = _result[:8] + [_result[8:]] + _result = _ParseSequenceExampleOutput._make(_result) + return _result + +_ParseSequenceExampleV2Output = collections.namedtuple( + "ParseSequenceExampleV2", + ["context_sparse_indices", "context_sparse_values", "context_sparse_shapes", "context_dense_values", "context_ragged_values", "context_ragged_row_splits", "feature_list_sparse_indices", "feature_list_sparse_values", "feature_list_sparse_shapes", "feature_list_dense_values", "feature_list_dense_lengths", "feature_list_ragged_values", "feature_list_ragged_outer_splits", "feature_list_ragged_inner_splits"]) + + +def parse_sequence_example_v2(serialized: Annotated[Any, _atypes.String], debug_name: Annotated[Any, _atypes.String], context_sparse_keys: Annotated[Any, _atypes.String], context_dense_keys: Annotated[Any, _atypes.String], context_ragged_keys: Annotated[Any, _atypes.String], feature_list_sparse_keys: Annotated[Any, _atypes.String], feature_list_dense_keys: Annotated[Any, _atypes.String], feature_list_ragged_keys: Annotated[Any, _atypes.String], feature_list_dense_missing_assumed_empty: Annotated[Any, _atypes.Bool], context_dense_defaults, Ncontext_sparse:int=0, context_sparse_types=[], context_ragged_value_types=[], context_ragged_split_types=[], context_dense_shapes=[], Nfeature_list_sparse:int=0, Nfeature_list_dense:int=0, feature_list_dense_types=[], feature_list_sparse_types=[], feature_list_ragged_value_types=[], feature_list_ragged_split_types=[], feature_list_dense_shapes=[], name=None): + r"""Transforms a vector of tf.io.SequenceExample protos (as strings) into +typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A scalar or vector containing binary serialized SequenceExample protos. + debug_name: A `Tensor` of type `string`. + A scalar or vector containing the names of the serialized protos. + May contain, for example, table key (descriptive) name for the + corresponding serialized proto. This is purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no name is available. + context_sparse_keys: A `Tensor` of type `string`. + The keys expected in the Examples' features associated with context_sparse + values. + context_dense_keys: A `Tensor` of type `string`. + The keys expected in the SequenceExamples' context features associated with + dense values. + context_ragged_keys: A `Tensor` of type `string`. + The keys expected in the Examples' features associated with context_ragged + values. + feature_list_sparse_keys: A `Tensor` of type `string`. + The keys expected in the FeatureLists associated with sparse values. + feature_list_dense_keys: A `Tensor` of type `string`. + The keys expected in the SequenceExamples' feature_lists associated + with lists of dense values. + feature_list_ragged_keys: A `Tensor` of type `string`. + The keys expected in the FeatureLists associated with ragged values. + feature_list_dense_missing_assumed_empty: A `Tensor` of type `bool`. + A vector corresponding 1:1 with feature_list_dense_keys, indicating which + features may be missing from the SequenceExamples. If the associated + FeatureList is missing, it is treated as empty. + context_dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Ncontext_dense Tensors (some may be empty). + context_dense_defaults[j] provides default values + when the SequenceExample's context map lacks context_dense_key[j]. + If an empty Tensor is provided for context_dense_defaults[j], + then the Feature context_dense_keys[j] is required. + The input type is inferred from context_dense_defaults[j], even when it's + empty. If context_dense_defaults[j] is not empty, its shape must match + context_dense_shapes[j]. + Ncontext_sparse: An optional `int` that is `>= 0`. Defaults to `0`. + context_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Ncontext_sparse types; the data types of data in + each context Feature given in context_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + context_ragged_value_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + RaggedTensor.value dtypes for the ragged context features. + context_ragged_split_types: An optional list of `tf.DTypes` from: `tf.int32, tf.int64`. Defaults to `[]`. + RaggedTensor.row_split dtypes for the ragged context features. + context_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Ncontext_dense shapes; the shapes of data in + each context Feature given in context_dense_keys. + The number of elements in the Feature corresponding to context_dense_key[j] + must always equal context_dense_shapes[j].NumEntries(). + The shape of context_dense_values[j] will match context_dense_shapes[j]. + Nfeature_list_sparse: An optional `int` that is `>= 0`. Defaults to `0`. + Nfeature_list_dense: An optional `int` that is `>= 0`. Defaults to `0`. + feature_list_dense_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + feature_list_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Nfeature_list_sparse types; the data types + of data in each FeatureList given in feature_list_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + feature_list_ragged_value_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + RaggedTensor.value dtypes for the ragged FeatureList features. + feature_list_ragged_split_types: An optional list of `tf.DTypes` from: `tf.int32, tf.int64`. Defaults to `[]`. + RaggedTensor.row_split dtypes for the ragged FeatureList features. + feature_list_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Nfeature_list_dense shapes; the shapes of + data in each FeatureList given in feature_list_dense_keys. + The shape of each Feature in the FeatureList corresponding to + feature_list_dense_key[j] must always equal + feature_list_dense_shapes[j].NumEntries(). + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits). + + context_sparse_indices: A list of `Ncontext_sparse` `Tensor` objects with type `int64`. + context_sparse_values: A list of `Tensor` objects of type `context_sparse_types`. + context_sparse_shapes: A list of `Ncontext_sparse` `Tensor` objects with type `int64`. + context_dense_values: A list of `Tensor` objects. Has the same type as `context_dense_defaults`. + context_ragged_values: A list of `Tensor` objects of type `context_ragged_value_types`. + context_ragged_row_splits: A list of `Tensor` objects of type `context_ragged_split_types`. + feature_list_sparse_indices: A list of `Nfeature_list_sparse` `Tensor` objects with type `int64`. + feature_list_sparse_values: A list of `Tensor` objects of type `feature_list_sparse_types`. + feature_list_sparse_shapes: A list of `Nfeature_list_sparse` `Tensor` objects with type `int64`. + feature_list_dense_values: A list of `Tensor` objects of type `feature_list_dense_types`. + feature_list_dense_lengths: A list of `Nfeature_list_dense` `Tensor` objects with type `int64`. + feature_list_ragged_values: A list of `Tensor` objects of type `feature_list_ragged_value_types`. + feature_list_ragged_outer_splits: A list of `Tensor` objects of type `feature_list_ragged_split_types`. + feature_list_ragged_inner_splits: A list of `Tensor` objects of type `feature_list_ragged_split_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseSequenceExampleV2", name, serialized, debug_name, + context_sparse_keys, context_dense_keys, context_ragged_keys, + feature_list_sparse_keys, feature_list_dense_keys, + feature_list_ragged_keys, feature_list_dense_missing_assumed_empty, + context_dense_defaults, "Ncontext_sparse", Ncontext_sparse, + "context_sparse_types", context_sparse_types, + "context_ragged_value_types", context_ragged_value_types, + "context_ragged_split_types", context_ragged_split_types, + "context_dense_shapes", context_dense_shapes, "Nfeature_list_sparse", + Nfeature_list_sparse, "Nfeature_list_dense", Nfeature_list_dense, + "feature_list_dense_types", feature_list_dense_types, + "feature_list_sparse_types", feature_list_sparse_types, + "feature_list_ragged_value_types", feature_list_ragged_value_types, + "feature_list_ragged_split_types", feature_list_ragged_split_types, + "feature_list_dense_shapes", feature_list_dense_shapes) + _result = _ParseSequenceExampleV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_sequence_example_v2_eager_fallback( + serialized, debug_name, context_sparse_keys, context_dense_keys, + context_ragged_keys, feature_list_sparse_keys, + feature_list_dense_keys, feature_list_ragged_keys, + feature_list_dense_missing_assumed_empty, context_dense_defaults, + Ncontext_sparse=Ncontext_sparse, + context_sparse_types=context_sparse_types, + context_ragged_value_types=context_ragged_value_types, + context_ragged_split_types=context_ragged_split_types, + context_dense_shapes=context_dense_shapes, + Nfeature_list_sparse=Nfeature_list_sparse, + Nfeature_list_dense=Nfeature_list_dense, + feature_list_dense_types=feature_list_dense_types, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_ragged_value_types=feature_list_ragged_value_types, + feature_list_ragged_split_types=feature_list_ragged_split_types, + feature_list_dense_shapes=feature_list_dense_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if Ncontext_sparse is None: + Ncontext_sparse = 0 + Ncontext_sparse = _execute.make_int(Ncontext_sparse, "Ncontext_sparse") + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if context_ragged_value_types is None: + context_ragged_value_types = [] + if not isinstance(context_ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_ragged_value_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_ragged_value_types) + context_ragged_value_types = [_execute.make_type(_t, "context_ragged_value_types") for _t in context_ragged_value_types] + if context_ragged_split_types is None: + context_ragged_split_types = [] + if not isinstance(context_ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_ragged_split_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_ragged_split_types) + context_ragged_split_types = [_execute.make_type(_t, "context_ragged_split_types") for _t in context_ragged_split_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if Nfeature_list_sparse is None: + Nfeature_list_sparse = 0 + Nfeature_list_sparse = _execute.make_int(Nfeature_list_sparse, "Nfeature_list_sparse") + if Nfeature_list_dense is None: + Nfeature_list_dense = 0 + Nfeature_list_dense = _execute.make_int(Nfeature_list_dense, "Nfeature_list_dense") + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_ragged_value_types is None: + feature_list_ragged_value_types = [] + if not isinstance(feature_list_ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_ragged_value_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_ragged_value_types) + feature_list_ragged_value_types = [_execute.make_type(_t, "feature_list_ragged_value_types") for _t in feature_list_ragged_value_types] + if feature_list_ragged_split_types is None: + feature_list_ragged_split_types = [] + if not isinstance(feature_list_ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_ragged_split_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_ragged_split_types) + feature_list_ragged_split_types = [_execute.make_type(_t, "feature_list_ragged_split_types") for _t in feature_list_ragged_split_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseSequenceExampleV2", serialized=serialized, + debug_name=debug_name, + context_sparse_keys=context_sparse_keys, + context_dense_keys=context_dense_keys, + context_ragged_keys=context_ragged_keys, + feature_list_sparse_keys=feature_list_sparse_keys, + feature_list_dense_keys=feature_list_dense_keys, + feature_list_ragged_keys=feature_list_ragged_keys, + feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, + context_dense_defaults=context_dense_defaults, + Ncontext_sparse=Ncontext_sparse, + context_sparse_types=context_sparse_types, + context_ragged_value_types=context_ragged_value_types, + context_ragged_split_types=context_ragged_split_types, + context_dense_shapes=context_dense_shapes, + Nfeature_list_sparse=Nfeature_list_sparse, + Nfeature_list_dense=Nfeature_list_dense, + feature_list_dense_types=feature_list_dense_types, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_ragged_value_types=feature_list_ragged_value_types, + feature_list_ragged_split_types=feature_list_ragged_split_types, + feature_list_dense_shapes=feature_list_dense_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Ncontext_sparse", _op._get_attr_int("Ncontext_sparse"), + "Tcontext_dense", _op.get_attr("Tcontext_dense"), + "context_sparse_types", _op.get_attr("context_sparse_types"), + "context_ragged_value_types", + _op.get_attr("context_ragged_value_types"), + "context_ragged_split_types", + _op.get_attr("context_ragged_split_types"), + "context_dense_shapes", _op.get_attr("context_dense_shapes"), + "Nfeature_list_sparse", + _op._get_attr_int("Nfeature_list_sparse"), + "Nfeature_list_dense", _op._get_attr_int("Nfeature_list_dense"), + "feature_list_dense_types", + _op.get_attr("feature_list_dense_types"), + "feature_list_sparse_types", + _op.get_attr("feature_list_sparse_types"), + "feature_list_ragged_value_types", + _op.get_attr("feature_list_ragged_value_types"), + "feature_list_ragged_split_types", + _op.get_attr("feature_list_ragged_split_types"), + "feature_list_dense_shapes", + _op.get_attr("feature_list_dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseSequenceExampleV2", _inputs_flat, _attrs, _result) + _result = [_result[:Ncontext_sparse]] + _result[Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + Ncontext_sparse]] + _result[2 + Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + len(context_ragged_value_types)]] + _result[4 + len(context_ragged_value_types):] + _result = _result[:5] + [_result[5:5 + len(context_ragged_split_types)]] + _result[5 + len(context_ragged_split_types):] + _result = _result[:6] + [_result[6:6 + Nfeature_list_sparse]] + _result[6 + Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:7 + len(feature_list_sparse_types)]] + _result[7 + len(feature_list_sparse_types):] + _result = _result[:8] + [_result[8:8 + Nfeature_list_sparse]] + _result[8 + Nfeature_list_sparse:] + _result = _result[:9] + [_result[9:9 + len(feature_list_dense_types)]] + _result[9 + len(feature_list_dense_types):] + _result = _result[:10] + [_result[10:10 + Nfeature_list_dense]] + _result[10 + Nfeature_list_dense:] + _result = _result[:11] + [_result[11:11 + len(feature_list_ragged_value_types)]] + _result[11 + len(feature_list_ragged_value_types):] + _result = _result[:12] + [_result[12:12 + len(feature_list_ragged_split_types)]] + _result[12 + len(feature_list_ragged_split_types):] + _result = _result[:13] + [_result[13:]] + _result = _ParseSequenceExampleV2Output._make(_result) + return _result + +ParseSequenceExampleV2 = tf_export("raw_ops.ParseSequenceExampleV2")(_ops.to_raw_op(parse_sequence_example_v2)) + + +def parse_sequence_example_v2_eager_fallback(serialized: Annotated[Any, _atypes.String], debug_name: Annotated[Any, _atypes.String], context_sparse_keys: Annotated[Any, _atypes.String], context_dense_keys: Annotated[Any, _atypes.String], context_ragged_keys: Annotated[Any, _atypes.String], feature_list_sparse_keys: Annotated[Any, _atypes.String], feature_list_dense_keys: Annotated[Any, _atypes.String], feature_list_ragged_keys: Annotated[Any, _atypes.String], feature_list_dense_missing_assumed_empty: Annotated[Any, _atypes.Bool], context_dense_defaults, Ncontext_sparse: int, context_sparse_types, context_ragged_value_types, context_ragged_split_types, context_dense_shapes, Nfeature_list_sparse: int, Nfeature_list_dense: int, feature_list_dense_types, feature_list_sparse_types, feature_list_ragged_value_types, feature_list_ragged_split_types, feature_list_dense_shapes, name, ctx): + if Ncontext_sparse is None: + Ncontext_sparse = 0 + Ncontext_sparse = _execute.make_int(Ncontext_sparse, "Ncontext_sparse") + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if context_ragged_value_types is None: + context_ragged_value_types = [] + if not isinstance(context_ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_ragged_value_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_ragged_value_types) + context_ragged_value_types = [_execute.make_type(_t, "context_ragged_value_types") for _t in context_ragged_value_types] + if context_ragged_split_types is None: + context_ragged_split_types = [] + if not isinstance(context_ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_ragged_split_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_ragged_split_types) + context_ragged_split_types = [_execute.make_type(_t, "context_ragged_split_types") for _t in context_ragged_split_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_sequence_example_v2' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if Nfeature_list_sparse is None: + Nfeature_list_sparse = 0 + Nfeature_list_sparse = _execute.make_int(Nfeature_list_sparse, "Nfeature_list_sparse") + if Nfeature_list_dense is None: + Nfeature_list_dense = 0 + Nfeature_list_dense = _execute.make_int(Nfeature_list_dense, "Nfeature_list_dense") + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_ragged_value_types is None: + feature_list_ragged_value_types = [] + if not isinstance(feature_list_ragged_value_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_ragged_value_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_ragged_value_types) + feature_list_ragged_value_types = [_execute.make_type(_t, "feature_list_ragged_value_types") for _t in feature_list_ragged_value_types] + if feature_list_ragged_split_types is None: + feature_list_ragged_split_types = [] + if not isinstance(feature_list_ragged_split_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_ragged_split_types' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_ragged_split_types) + feature_list_ragged_split_types = [_execute.make_type(_t, "feature_list_ragged_split_types") for _t in feature_list_ragged_split_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_sequence_example_v2' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _attr_Tcontext_dense, context_dense_defaults = _execute.convert_to_mixed_eager_tensors(context_dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + debug_name = _ops.convert_to_tensor(debug_name, _dtypes.string) + context_sparse_keys = _ops.convert_to_tensor(context_sparse_keys, _dtypes.string) + context_dense_keys = _ops.convert_to_tensor(context_dense_keys, _dtypes.string) + context_ragged_keys = _ops.convert_to_tensor(context_ragged_keys, _dtypes.string) + feature_list_sparse_keys = _ops.convert_to_tensor(feature_list_sparse_keys, _dtypes.string) + feature_list_dense_keys = _ops.convert_to_tensor(feature_list_dense_keys, _dtypes.string) + feature_list_ragged_keys = _ops.convert_to_tensor(feature_list_ragged_keys, _dtypes.string) + feature_list_dense_missing_assumed_empty = _ops.convert_to_tensor(feature_list_dense_missing_assumed_empty, _dtypes.bool) + _inputs_flat = [serialized, debug_name, context_sparse_keys, context_dense_keys, context_ragged_keys, feature_list_sparse_keys, feature_list_dense_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty] + list(context_dense_defaults) + _attrs = ("Ncontext_sparse", Ncontext_sparse, "Tcontext_dense", + _attr_Tcontext_dense, "context_sparse_types", context_sparse_types, + "context_ragged_value_types", context_ragged_value_types, + "context_ragged_split_types", context_ragged_split_types, + "context_dense_shapes", context_dense_shapes, "Nfeature_list_sparse", + Nfeature_list_sparse, "Nfeature_list_dense", Nfeature_list_dense, + "feature_list_dense_types", feature_list_dense_types, + "feature_list_sparse_types", feature_list_sparse_types, + "feature_list_ragged_value_types", feature_list_ragged_value_types, + "feature_list_ragged_split_types", feature_list_ragged_split_types, + "feature_list_dense_shapes", feature_list_dense_shapes) + _result = _execute.execute(b"ParseSequenceExampleV2", Ncontext_sparse + + len(context_sparse_types) + Ncontext_sparse + + len(context_dense_defaults) + + len(context_ragged_value_types) + + len(context_ragged_split_types) + + Nfeature_list_sparse + + len(feature_list_sparse_types) + + Nfeature_list_sparse + + len(feature_list_dense_types) + + Nfeature_list_dense + + len(feature_list_ragged_value_types) + + len(feature_list_ragged_split_types) + + len(feature_list_ragged_split_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseSequenceExampleV2", _inputs_flat, _attrs, _result) + _result = [_result[:Ncontext_sparse]] + _result[Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + Ncontext_sparse]] + _result[2 + Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + len(context_ragged_value_types)]] + _result[4 + len(context_ragged_value_types):] + _result = _result[:5] + [_result[5:5 + len(context_ragged_split_types)]] + _result[5 + len(context_ragged_split_types):] + _result = _result[:6] + [_result[6:6 + Nfeature_list_sparse]] + _result[6 + Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:7 + len(feature_list_sparse_types)]] + _result[7 + len(feature_list_sparse_types):] + _result = _result[:8] + [_result[8:8 + Nfeature_list_sparse]] + _result[8 + Nfeature_list_sparse:] + _result = _result[:9] + [_result[9:9 + len(feature_list_dense_types)]] + _result[9 + len(feature_list_dense_types):] + _result = _result[:10] + [_result[10:10 + Nfeature_list_dense]] + _result[10 + Nfeature_list_dense:] + _result = _result[:11] + [_result[11:11 + len(feature_list_ragged_value_types)]] + _result[11 + len(feature_list_ragged_value_types):] + _result = _result[:12] + [_result[12:12 + len(feature_list_ragged_split_types)]] + _result[12 + len(feature_list_ragged_split_types):] + _result = _result[:13] + [_result[13:]] + _result = _ParseSequenceExampleV2Output._make(_result) + return _result + +_ParseSingleExampleOutput = collections.namedtuple( + "ParseSingleExample", + ["sparse_indices", "sparse_values", "sparse_shapes", "dense_values"]) + + +def parse_single_example(serialized: Annotated[Any, _atypes.String], dense_defaults, num_sparse: int, sparse_keys, dense_keys, sparse_types, dense_shapes, name=None): + r"""Transforms a tf.Example proto (as a string) into typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A vector containing a batch of binary serialized Example protos. + dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Tensors (some may be empty), whose length matches + the length of `dense_keys`. dense_defaults[j] provides default values + when the example's feature_map lacks dense_key[j]. If an empty Tensor is + provided for dense_defaults[j], then the Feature dense_keys[j] is required. + The input type is inferred from dense_defaults[j], even when it's empty. + If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + then the shape of dense_defaults[j] must match that of dense_shapes[j]. + If dense_shapes[j] has an undefined major dimension (variable strides dense + feature), dense_defaults[j] must contain a single element: + the padding element. + num_sparse: An `int` that is `>= 0`. + The number of sparse features to be parsed from the example. This + must match the lengths of `sparse_keys` and `sparse_types`. + sparse_keys: A list of `strings`. A list of `num_sparse` strings. + The keys expected in the Examples' features associated with sparse values. + dense_keys: A list of `strings`. + The keys expected in the Examples' features associated with dense + values. + sparse_types: A list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. + A list of `num_sparse` types; the data types of data in each + Feature given in sparse_keys. + Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + dense_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of data in each Feature given in dense_keys. + The length of this list must match the length of `dense_keys`. The + number of elements in the Feature corresponding to dense_key[j] must + always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + ..., DN), the shape of the output Tensor dense_values[j] will be (M, + D1, .., DN), where M is the number of blocks of elements of length + D1 * .... * DN, in the input. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shapes, dense_values). + + sparse_indices: A list of `num_sparse` `Tensor` objects with type `int64`. + sparse_values: A list of `Tensor` objects of type `sparse_types`. + sparse_shapes: A list of `num_sparse` `Tensor` objects with type `int64`. + dense_values: A list of `Tensor` objects. Has the same type as `dense_defaults`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseSingleExample", name, serialized, dense_defaults, + "num_sparse", num_sparse, "sparse_keys", sparse_keys, "dense_keys", + dense_keys, "sparse_types", sparse_types, "dense_shapes", + dense_shapes) + _result = _ParseSingleExampleOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_single_example_eager_fallback( + serialized, dense_defaults, num_sparse=num_sparse, + sparse_keys=sparse_keys, dense_keys=dense_keys, + sparse_types=sparse_types, dense_shapes=dense_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_sparse = _execute.make_int(num_sparse, "num_sparse") + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_single_example' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_single_example' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_single_example' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_single_example' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseSingleExample", serialized=serialized, + dense_defaults=dense_defaults, + num_sparse=num_sparse, sparse_keys=sparse_keys, + dense_keys=dense_keys, + sparse_types=sparse_types, + dense_shapes=dense_shapes, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_sparse", _op._get_attr_int("num_sparse"), "sparse_keys", + _op.get_attr("sparse_keys"), "dense_keys", + _op.get_attr("dense_keys"), "sparse_types", + _op.get_attr("sparse_types"), "Tdense", _op.get_attr("Tdense"), + "dense_shapes", _op.get_attr("dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseSingleExample", _inputs_flat, _attrs, _result) + _result = [_result[:num_sparse]] + _result[num_sparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + num_sparse]] + _result[2 + num_sparse:] + _result = _result[:3] + [_result[3:]] + _result = _ParseSingleExampleOutput._make(_result) + return _result + +ParseSingleExample = tf_export("raw_ops.ParseSingleExample")(_ops.to_raw_op(parse_single_example)) + + +def parse_single_example_eager_fallback(serialized: Annotated[Any, _atypes.String], dense_defaults, num_sparse: int, sparse_keys, dense_keys, sparse_types, dense_shapes, name, ctx): + num_sparse = _execute.make_int(num_sparse, "num_sparse") + if not isinstance(sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_keys' argument to " + "'parse_single_example' Op, not %r." % sparse_keys) + sparse_keys = [_execute.make_str(_s, "sparse_keys") for _s in sparse_keys] + if not isinstance(dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'dense_keys' argument to " + "'parse_single_example' Op, not %r." % dense_keys) + dense_keys = [_execute.make_str(_s, "dense_keys") for _s in dense_keys] + if not isinstance(sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'sparse_types' argument to " + "'parse_single_example' Op, not %r." % sparse_types) + sparse_types = [_execute.make_type(_t, "sparse_types") for _t in sparse_types] + if not isinstance(dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'dense_shapes' argument to " + "'parse_single_example' Op, not %r." % dense_shapes) + dense_shapes = [_execute.make_shape(_s, "dense_shapes") for _s in dense_shapes] + _attr_Tdense, dense_defaults = _execute.convert_to_mixed_eager_tensors(dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + _inputs_flat = [serialized] + list(dense_defaults) + _attrs = ("num_sparse", num_sparse, "sparse_keys", sparse_keys, + "dense_keys", dense_keys, "sparse_types", sparse_types, "Tdense", + _attr_Tdense, "dense_shapes", dense_shapes) + _result = _execute.execute(b"ParseSingleExample", num_sparse + + len(sparse_types) + num_sparse + + len(dense_defaults), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseSingleExample", _inputs_flat, _attrs, _result) + _result = [_result[:num_sparse]] + _result[num_sparse:] + _result = _result[:1] + [_result[1:1 + len(sparse_types)]] + _result[1 + len(sparse_types):] + _result = _result[:2] + [_result[2:2 + num_sparse]] + _result[2 + num_sparse:] + _result = _result[:3] + [_result[3:]] + _result = _ParseSingleExampleOutput._make(_result) + return _result + +_ParseSingleSequenceExampleOutput = collections.namedtuple( + "ParseSingleSequenceExample", + ["context_sparse_indices", "context_sparse_values", "context_sparse_shapes", "context_dense_values", "feature_list_sparse_indices", "feature_list_sparse_values", "feature_list_sparse_shapes", "feature_list_dense_values"]) + + +def parse_single_sequence_example(serialized: Annotated[Any, _atypes.String], feature_list_dense_missing_assumed_empty: Annotated[Any, _atypes.String], context_sparse_keys: Annotated[List[Any], _atypes.String], context_dense_keys: Annotated[List[Any], _atypes.String], feature_list_sparse_keys: Annotated[List[Any], _atypes.String], feature_list_dense_keys: Annotated[List[Any], _atypes.String], context_dense_defaults, debug_name: Annotated[Any, _atypes.String], context_sparse_types=[], feature_list_dense_types=[], context_dense_shapes=[], feature_list_sparse_types=[], feature_list_dense_shapes=[], name=None): + r"""Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. + + Args: + serialized: A `Tensor` of type `string`. + A scalar containing a binary serialized SequenceExample proto. + feature_list_dense_missing_assumed_empty: A `Tensor` of type `string`. + A vector listing the + FeatureList keys which may be missing from the SequenceExample. If the + associated FeatureList is missing, it is treated as empty. By default, + any FeatureList not listed in this vector must exist in the SequenceExample. + context_sparse_keys: A list of `Tensor` objects with type `string`. + A list of Ncontext_sparse string Tensors (scalars). + The keys expected in the Examples' features associated with context_sparse + values. + context_dense_keys: A list of `Tensor` objects with type `string`. + A list of Ncontext_dense string Tensors (scalars). + The keys expected in the SequenceExamples' context features associated with + dense values. + feature_list_sparse_keys: A list of `Tensor` objects with type `string`. + A list of Nfeature_list_sparse string Tensors + (scalars). The keys expected in the FeatureLists associated with sparse + values. + feature_list_dense_keys: A list of `Tensor` objects with type `string`. + A list of Nfeature_list_dense string Tensors (scalars). + The keys expected in the SequenceExamples' feature_lists associated + with lists of dense values. + context_dense_defaults: A list of `Tensor` objects with types from: `float32`, `int64`, `string`. + A list of Ncontext_dense Tensors (some may be empty). + context_dense_defaults[j] provides default values + when the SequenceExample's context map lacks context_dense_key[j]. + If an empty Tensor is provided for context_dense_defaults[j], + then the Feature context_dense_keys[j] is required. + The input type is inferred from context_dense_defaults[j], even when it's + empty. If context_dense_defaults[j] is not empty, its shape must match + context_dense_shapes[j]. + debug_name: A `Tensor` of type `string`. + A scalar containing the name of the serialized proto. + May contain, for example, table key (descriptive) name for the + corresponding serialized proto. This is purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty scalar if no name is available. + context_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Ncontext_sparse types; the data types of data in + each context Feature given in context_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + feature_list_dense_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + context_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Ncontext_dense shapes; the shapes of data in + each context Feature given in context_dense_keys. + The number of elements in the Feature corresponding to context_dense_key[j] + must always equal context_dense_shapes[j].NumEntries(). + The shape of context_dense_values[j] will match context_dense_shapes[j]. + feature_list_sparse_types: An optional list of `tf.DTypes` from: `tf.float32, tf.int64, tf.string`. Defaults to `[]`. + A list of Nfeature_list_sparse types; the data types + of data in each FeatureList given in feature_list_sparse_keys. + Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + DT_INT64 (Int64List), and DT_STRING (BytesList). + feature_list_dense_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. + A list of Nfeature_list_dense shapes; the shapes of + data in each FeatureList given in feature_list_dense_keys. + The shape of each Feature in the FeatureList corresponding to + feature_list_dense_key[j] must always equal + feature_list_dense_shapes[j].NumEntries(). + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values). + + context_sparse_indices: A list with the same length as `context_sparse_keys` of `Tensor` objects with type `int64`. + context_sparse_values: A list of `Tensor` objects of type `context_sparse_types`. + context_sparse_shapes: A list with the same length as `context_sparse_keys` of `Tensor` objects with type `int64`. + context_dense_values: A list of `Tensor` objects. Has the same type as `context_dense_defaults`. + feature_list_sparse_indices: A list with the same length as `feature_list_sparse_keys` of `Tensor` objects with type `int64`. + feature_list_sparse_values: A list of `Tensor` objects of type `feature_list_sparse_types`. + feature_list_sparse_shapes: A list with the same length as `feature_list_sparse_keys` of `Tensor` objects with type `int64`. + feature_list_dense_values: A list of `Tensor` objects of type `feature_list_dense_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseSingleSequenceExample", name, serialized, + feature_list_dense_missing_assumed_empty, context_sparse_keys, + context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, + context_dense_defaults, debug_name, "context_sparse_types", + context_sparse_types, "feature_list_dense_types", + feature_list_dense_types, "context_dense_shapes", + context_dense_shapes, "feature_list_sparse_types", + feature_list_sparse_types, "feature_list_dense_shapes", + feature_list_dense_shapes) + _result = _ParseSingleSequenceExampleOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parse_single_sequence_example_eager_fallback( + serialized, feature_list_dense_missing_assumed_empty, + context_sparse_keys, context_dense_keys, feature_list_sparse_keys, + feature_list_dense_keys, context_dense_defaults, debug_name, + context_sparse_types=context_sparse_types, + feature_list_dense_types=feature_list_dense_types, + context_dense_shapes=context_dense_shapes, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_dense_shapes=feature_list_dense_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(context_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % context_sparse_keys) + _attr_Ncontext_sparse = len(context_sparse_keys) + if not isinstance(context_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % context_dense_keys) + _attr_Ncontext_dense = len(context_dense_keys) + if not isinstance(feature_list_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_sparse_keys) + _attr_Nfeature_list_sparse = len(feature_list_sparse_keys) + if not isinstance(feature_list_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_keys) + _attr_Nfeature_list_dense = len(feature_list_dense_keys) + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_single_sequence_example' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_single_sequence_example' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseSingleSequenceExample", serialized=serialized, + feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, + context_sparse_keys=context_sparse_keys, + context_dense_keys=context_dense_keys, + feature_list_sparse_keys=feature_list_sparse_keys, + feature_list_dense_keys=feature_list_dense_keys, + context_dense_defaults=context_dense_defaults, + debug_name=debug_name, + context_sparse_types=context_sparse_types, + feature_list_dense_types=feature_list_dense_types, + context_dense_shapes=context_dense_shapes, + feature_list_sparse_types=feature_list_sparse_types, + feature_list_dense_shapes=feature_list_dense_shapes, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Ncontext_sparse", _op._get_attr_int("Ncontext_sparse"), + "Ncontext_dense", _op._get_attr_int("Ncontext_dense"), + "Nfeature_list_sparse", + _op._get_attr_int("Nfeature_list_sparse"), + "Nfeature_list_dense", _op._get_attr_int("Nfeature_list_dense"), + "context_sparse_types", _op.get_attr("context_sparse_types"), + "Tcontext_dense", _op.get_attr("Tcontext_dense"), + "feature_list_dense_types", + _op.get_attr("feature_list_dense_types"), + "context_dense_shapes", _op.get_attr("context_dense_shapes"), + "feature_list_sparse_types", + _op.get_attr("feature_list_sparse_types"), + "feature_list_dense_shapes", + _op.get_attr("feature_list_dense_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseSingleSequenceExample", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_Ncontext_sparse]] + _result[_attr_Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + _attr_Ncontext_sparse]] + _result[2 + _attr_Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + _attr_Nfeature_list_sparse]] + _result[4 + _attr_Nfeature_list_sparse:] + _result = _result[:5] + [_result[5:5 + len(feature_list_sparse_types)]] + _result[5 + len(feature_list_sparse_types):] + _result = _result[:6] + [_result[6:6 + _attr_Nfeature_list_sparse]] + _result[6 + _attr_Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:]] + _result = _ParseSingleSequenceExampleOutput._make(_result) + return _result + +ParseSingleSequenceExample = tf_export("raw_ops.ParseSingleSequenceExample")(_ops.to_raw_op(parse_single_sequence_example)) + + +def parse_single_sequence_example_eager_fallback(serialized: Annotated[Any, _atypes.String], feature_list_dense_missing_assumed_empty: Annotated[Any, _atypes.String], context_sparse_keys: Annotated[List[Any], _atypes.String], context_dense_keys: Annotated[List[Any], _atypes.String], feature_list_sparse_keys: Annotated[List[Any], _atypes.String], feature_list_dense_keys: Annotated[List[Any], _atypes.String], context_dense_defaults, debug_name: Annotated[Any, _atypes.String], context_sparse_types, feature_list_dense_types, context_dense_shapes, feature_list_sparse_types, feature_list_dense_shapes, name, ctx): + if not isinstance(context_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % context_sparse_keys) + _attr_Ncontext_sparse = len(context_sparse_keys) + if not isinstance(context_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % context_dense_keys) + _attr_Ncontext_dense = len(context_dense_keys) + if not isinstance(feature_list_sparse_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_sparse_keys) + _attr_Nfeature_list_sparse = len(feature_list_sparse_keys) + if not isinstance(feature_list_dense_keys, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_keys' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_keys) + _attr_Nfeature_list_dense = len(feature_list_dense_keys) + if context_sparse_types is None: + context_sparse_types = [] + if not isinstance(context_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'context_sparse_types' argument to " + "'parse_single_sequence_example' Op, not %r." % context_sparse_types) + context_sparse_types = [_execute.make_type(_t, "context_sparse_types") for _t in context_sparse_types] + if feature_list_dense_types is None: + feature_list_dense_types = [] + if not isinstance(feature_list_dense_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_types' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_types) + feature_list_dense_types = [_execute.make_type(_t, "feature_list_dense_types") for _t in feature_list_dense_types] + if context_dense_shapes is None: + context_dense_shapes = [] + if not isinstance(context_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'context_dense_shapes' argument to " + "'parse_single_sequence_example' Op, not %r." % context_dense_shapes) + context_dense_shapes = [_execute.make_shape(_s, "context_dense_shapes") for _s in context_dense_shapes] + if feature_list_sparse_types is None: + feature_list_sparse_types = [] + if not isinstance(feature_list_sparse_types, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_sparse_types' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_sparse_types) + feature_list_sparse_types = [_execute.make_type(_t, "feature_list_sparse_types") for _t in feature_list_sparse_types] + if feature_list_dense_shapes is None: + feature_list_dense_shapes = [] + if not isinstance(feature_list_dense_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'feature_list_dense_shapes' argument to " + "'parse_single_sequence_example' Op, not %r." % feature_list_dense_shapes) + feature_list_dense_shapes = [_execute.make_shape(_s, "feature_list_dense_shapes") for _s in feature_list_dense_shapes] + _attr_Tcontext_dense, context_dense_defaults = _execute.convert_to_mixed_eager_tensors(context_dense_defaults, ctx) + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + feature_list_dense_missing_assumed_empty = _ops.convert_to_tensor(feature_list_dense_missing_assumed_empty, _dtypes.string) + context_sparse_keys = _ops.convert_n_to_tensor(context_sparse_keys, _dtypes.string) + context_dense_keys = _ops.convert_n_to_tensor(context_dense_keys, _dtypes.string) + feature_list_sparse_keys = _ops.convert_n_to_tensor(feature_list_sparse_keys, _dtypes.string) + feature_list_dense_keys = _ops.convert_n_to_tensor(feature_list_dense_keys, _dtypes.string) + debug_name = _ops.convert_to_tensor(debug_name, _dtypes.string) + _inputs_flat = [serialized, feature_list_dense_missing_assumed_empty] + list(context_sparse_keys) + list(context_dense_keys) + list(feature_list_sparse_keys) + list(feature_list_dense_keys) + list(context_dense_defaults) + [debug_name] + _attrs = ("Ncontext_sparse", _attr_Ncontext_sparse, "Ncontext_dense", + _attr_Ncontext_dense, "Nfeature_list_sparse", _attr_Nfeature_list_sparse, + "Nfeature_list_dense", _attr_Nfeature_list_dense, "context_sparse_types", + context_sparse_types, "Tcontext_dense", _attr_Tcontext_dense, + "feature_list_dense_types", feature_list_dense_types, + "context_dense_shapes", context_dense_shapes, "feature_list_sparse_types", + feature_list_sparse_types, "feature_list_dense_shapes", + feature_list_dense_shapes) + _result = _execute.execute(b"ParseSingleSequenceExample", + _attr_Ncontext_sparse + len(context_sparse_types) + + _attr_Ncontext_sparse + + len(context_dense_defaults) + + _attr_Nfeature_list_sparse + + len(feature_list_sparse_types) + + _attr_Nfeature_list_sparse + + len(feature_list_dense_types), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseSingleSequenceExample", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_Ncontext_sparse]] + _result[_attr_Ncontext_sparse:] + _result = _result[:1] + [_result[1:1 + len(context_sparse_types)]] + _result[1 + len(context_sparse_types):] + _result = _result[:2] + [_result[2:2 + _attr_Ncontext_sparse]] + _result[2 + _attr_Ncontext_sparse:] + _result = _result[:3] + [_result[3:3 + len(context_dense_defaults)]] + _result[3 + len(context_dense_defaults):] + _result = _result[:4] + [_result[4:4 + _attr_Nfeature_list_sparse]] + _result[4 + _attr_Nfeature_list_sparse:] + _result = _result[:5] + [_result[5:5 + len(feature_list_sparse_types)]] + _result[5 + len(feature_list_sparse_types):] + _result = _result[:6] + [_result[6:6 + _attr_Nfeature_list_sparse]] + _result[6 + _attr_Nfeature_list_sparse:] + _result = _result[:7] + [_result[7:]] + _result = _ParseSingleSequenceExampleOutput._make(_result) + return _result + + +TV_ParseTensor_out_type = TypeVar("TV_ParseTensor_out_type", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.parse_tensor', v1=['io.parse_tensor', 'parse_tensor']) +@deprecated_endpoints('parse_tensor') +def parse_tensor(serialized: Annotated[Any, _atypes.String], out_type: TV_ParseTensor_out_type, name=None) -> Annotated[Any, TV_ParseTensor_out_type]: + r"""Transforms a serialized tensorflow.TensorProto proto into a Tensor. + + Args: + serialized: A `Tensor` of type `string`. + A scalar string containing a serialized TensorProto proto. + out_type: A `tf.DType`. + The type of the serialized tensor. The provided type must match the + type of the serialized tensor and no implicit conversion will take place. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `out_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParseTensor", name, serialized, "out_type", out_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_parse_tensor( + (serialized, out_type, name,), None) + if _result is not NotImplemented: + return _result + return parse_tensor_eager_fallback( + serialized, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + parse_tensor, (), dict(serialized=serialized, out_type=out_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_parse_tensor( + (serialized, out_type, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + out_type = _execute.make_type(out_type, "out_type") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParseTensor", serialized=serialized, out_type=out_type, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + parse_tensor, (), dict(serialized=serialized, out_type=out_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("out_type", _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParseTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParseTensor = tf_export("raw_ops.ParseTensor")(_ops.to_raw_op(parse_tensor)) +_dispatcher_for_parse_tensor = parse_tensor._tf_type_based_dispatcher.Dispatch + + +def parse_tensor_eager_fallback(serialized: Annotated[Any, _atypes.String], out_type: TV_ParseTensor_out_type, name, ctx) -> Annotated[Any, TV_ParseTensor_out_type]: + out_type = _execute.make_type(out_type, "out_type") + serialized = _ops.convert_to_tensor(serialized, _dtypes.string) + _inputs_flat = [serialized] + _attrs = ("out_type", out_type) + _result = _execute.execute(b"ParseTensor", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParseTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SerializeTensor_T = TypeVar("TV_SerializeTensor_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def serialize_tensor(tensor: Annotated[Any, TV_SerializeTensor_T], name=None) -> Annotated[Any, _atypes.String]: + r"""Transforms a Tensor into a serialized TensorProto proto. + + Args: + tensor: A `Tensor`. A Tensor of type `T`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SerializeTensor", name, tensor) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return serialize_tensor_eager_fallback( + tensor, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SerializeTensor", tensor=tensor, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SerializeTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SerializeTensor = tf_export("raw_ops.SerializeTensor")(_ops.to_raw_op(serialize_tensor)) + + +def serialize_tensor_eager_fallback(tensor: Annotated[Any, TV_SerializeTensor_T], name, ctx) -> Annotated[Any, _atypes.String]: + _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _inputs_flat = [tensor] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SerializeTensor", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SerializeTensor", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StringToNumber_out_type = TypeVar("TV_StringToNumber_out_type", _atypes.Float32, _atypes.Float64, _atypes.Int32, _atypes.Int64) + +def string_to_number(string_tensor: Annotated[Any, _atypes.String], out_type:TV_StringToNumber_out_type=_dtypes.float32, name=None) -> Annotated[Any, TV_StringToNumber_out_type]: + r"""Converts each string in the input Tensor to the specified numeric type. + + (Note that int32 overflow results in an error while float overflow + results in a rounded value.) + + Example: + + >>> strings = ["5.0", "3.0", "7.0"] + >>> tf.strings.to_number(strings) + + + Args: + string_tensor: A `Tensor` of type `string`. + out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`. + The numeric type to interpret each string in `string_tensor` as. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `out_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StringToNumber", name, string_tensor, "out_type", out_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return string_to_number_eager_fallback( + string_tensor, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if out_type is None: + out_type = _dtypes.float32 + out_type = _execute.make_type(out_type, "out_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StringToNumber", string_tensor=string_tensor, out_type=out_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("out_type", _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StringToNumber", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StringToNumber = tf_export("raw_ops.StringToNumber")(_ops.to_raw_op(string_to_number)) + + +def string_to_number_eager_fallback(string_tensor: Annotated[Any, _atypes.String], out_type: TV_StringToNumber_out_type, name, ctx) -> Annotated[Any, TV_StringToNumber_out_type]: + if out_type is None: + out_type = _dtypes.float32 + out_type = _execute.make_type(out_type, "out_type") + string_tensor = _ops.convert_to_tensor(string_tensor, _dtypes.string) + _inputs_flat = [string_tensor] + _attrs = ("out_type", out_type) + _result = _execute.execute(b"StringToNumber", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StringToNumber", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a999eefba38f68fcdff3a6ec041f0b299e2c717a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_random_ops.py @@ -0,0 +1,981 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_Multinomial_T = TypeVar("TV_Multinomial_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_Multinomial_output_dtype = TypeVar("TV_Multinomial_output_dtype", _atypes.Int32, _atypes.Int64) + +def multinomial(logits: Annotated[Any, TV_Multinomial_T], num_samples: Annotated[Any, _atypes.Int32], seed:int=0, seed2:int=0, output_dtype:TV_Multinomial_output_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_Multinomial_output_dtype]: + r"""Draws samples from a multinomial distribution. + + Args: + logits: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes. + num_samples: A `Tensor` of type `int32`. + 0-D. Number of independent samples to draw for each row slice. + seed: An optional `int`. Defaults to `0`. + If either seed or seed2 is set to be non-zero, the internal random number + generator is seeded by the given seed. Otherwise, a random seed is used. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `output_dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Multinomial", name, logits, num_samples, "seed", seed, "seed2", + seed2, "output_dtype", output_dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return multinomial_eager_fallback( + logits, num_samples, seed=seed, seed2=seed2, + output_dtype=output_dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if output_dtype is None: + output_dtype = _dtypes.int64 + output_dtype = _execute.make_type(output_dtype, "output_dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Multinomial", logits=logits, num_samples=num_samples, seed=seed, + seed2=seed2, output_dtype=output_dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "T", _op._get_attr_type("T"), + "output_dtype", _op._get_attr_type("output_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Multinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Multinomial = tf_export("raw_ops.Multinomial")(_ops.to_raw_op(multinomial)) + + +def multinomial_eager_fallback(logits: Annotated[Any, TV_Multinomial_T], num_samples: Annotated[Any, _atypes.Int32], seed: int, seed2: int, output_dtype: TV_Multinomial_output_dtype, name, ctx) -> Annotated[Any, TV_Multinomial_output_dtype]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if output_dtype is None: + output_dtype = _dtypes.int64 + output_dtype = _execute.make_type(output_dtype, "output_dtype") + _attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + num_samples = _ops.convert_to_tensor(num_samples, _dtypes.int32) + _inputs_flat = [logits, num_samples] + _attrs = ("seed", seed, "seed2", seed2, "T", _attr_T, "output_dtype", + output_dtype) + _result = _execute.execute(b"Multinomial", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Multinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ParameterizedTruncatedNormal_dtype = TypeVar("TV_ParameterizedTruncatedNormal_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_ParameterizedTruncatedNormal_T = TypeVar("TV_ParameterizedTruncatedNormal_T", _atypes.Int32, _atypes.Int64) + +def parameterized_truncated_normal(shape: Annotated[Any, TV_ParameterizedTruncatedNormal_T], means: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], stdevs: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], minvals: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], maxvals: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_ParameterizedTruncatedNormal_dtype]: + r"""Outputs random values from a normal distribution. The parameters may each be a + + scalar which applies to the entire output, or a vector of length shape[0] which + stores the parameters for each batch. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. Batches are indexed by the 0th dimension. + means: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. + The mean parameter of each batch. + stdevs: A `Tensor`. Must have the same type as `means`. + The standard deviation parameter of each batch. Must be greater than 0. + minvals: A `Tensor`. Must have the same type as `means`. + The minimum cutoff. May be -infinity. + maxvals: A `Tensor`. Must have the same type as `means`. + The maximum cutoff. May be +infinity, and must be more than the minval + for each batch. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `means`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ParameterizedTruncatedNormal", name, shape, means, stdevs, + minvals, maxvals, "seed", seed, "seed2", seed2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return parameterized_truncated_normal_eager_fallback( + shape, means, stdevs, minvals, maxvals, seed=seed, seed2=seed2, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ParameterizedTruncatedNormal", shape=shape, means=means, + stdevs=stdevs, minvals=minvals, + maxvals=maxvals, seed=seed, + seed2=seed2, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "dtype", + _op._get_attr_type("dtype"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ParameterizedTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ParameterizedTruncatedNormal = tf_export("raw_ops.ParameterizedTruncatedNormal")(_ops.to_raw_op(parameterized_truncated_normal)) + + +def parameterized_truncated_normal_eager_fallback(shape: Annotated[Any, TV_ParameterizedTruncatedNormal_T], means: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], stdevs: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], minvals: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], maxvals: Annotated[Any, TV_ParameterizedTruncatedNormal_dtype], seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_ParameterizedTruncatedNormal_dtype]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([means, stdevs, minvals, maxvals], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) + (means, stdevs, minvals, maxvals) = _inputs_dtype + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [shape, means, stdevs, minvals, maxvals] + _attrs = ("seed", seed, "seed2", seed2, "dtype", _attr_dtype, "T", _attr_T) + _result = _execute.execute(b"ParameterizedTruncatedNormal", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ParameterizedTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomGamma_S = TypeVar("TV_RandomGamma_S", _atypes.Int32, _atypes.Int64) +TV_RandomGamma_T = TypeVar("TV_RandomGamma_T", _atypes.Float32, _atypes.Float64, _atypes.Half) + +def random_gamma(shape: Annotated[Any, TV_RandomGamma_S], alpha: Annotated[Any, TV_RandomGamma_T], seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomGamma_T]: + r"""Outputs random values from the Gamma distribution(s) described by alpha. + + This op uses the algorithm by Marsaglia et al. to acquire samples via + transformation-rejection from pairs of uniform and normal random variables. + See http://dl.acm.org/citation.cfm?id=358414 + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha. + alpha: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. + A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `alpha`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomGamma", name, shape, alpha, "seed", seed, "seed2", seed2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_gamma_eager_fallback( + shape, alpha, seed=seed, seed2=seed2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomGamma", shape=shape, alpha=alpha, seed=seed, seed2=seed2, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "S", _op._get_attr_type("S"), "T", + _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomGamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomGamma = tf_export("raw_ops.RandomGamma")(_ops.to_raw_op(random_gamma)) + + +def random_gamma_eager_fallback(shape: Annotated[Any, TV_RandomGamma_S], alpha: Annotated[Any, TV_RandomGamma_T], seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomGamma_T]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_T, (alpha,) = _execute.args_to_matching_eager([alpha], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [shape, alpha] + _attrs = ("seed", seed, "seed2", seed2, "S", _attr_S, "T", _attr_T) + _result = _execute.execute(b"RandomGamma", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomGamma", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomGammaGrad_T = TypeVar("TV_RandomGammaGrad_T", _atypes.Float32, _atypes.Float64) + +def random_gamma_grad(alpha: Annotated[Any, TV_RandomGammaGrad_T], sample: Annotated[Any, TV_RandomGammaGrad_T], name=None) -> Annotated[Any, TV_RandomGammaGrad_T]: + r"""Computes the derivative of a Gamma random sample w.r.t. `alpha`. + + Args: + alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`. + sample: A `Tensor`. Must have the same type as `alpha`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `alpha`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomGammaGrad", name, alpha, sample) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_gamma_grad_eager_fallback( + alpha, sample, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomGammaGrad", alpha=alpha, sample=sample, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomGammaGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomGammaGrad = tf_export("raw_ops.RandomGammaGrad")(_ops.to_raw_op(random_gamma_grad)) + + +def random_gamma_grad_eager_fallback(alpha: Annotated[Any, TV_RandomGammaGrad_T], sample: Annotated[Any, TV_RandomGammaGrad_T], name, ctx) -> Annotated[Any, TV_RandomGammaGrad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, sample], ctx, [_dtypes.float32, _dtypes.float64, ]) + (alpha, sample) = _inputs_T + _inputs_flat = [alpha, sample] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"RandomGammaGrad", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomGammaGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomPoisson_S = TypeVar("TV_RandomPoisson_S", _atypes.Int32, _atypes.Int64) +TV_RandomPoisson_dtype = TypeVar("TV_RandomPoisson_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half) + +def random_poisson(shape: Annotated[Any, TV_RandomPoisson_S], rate: Annotated[Any, TV_RandomPoisson_dtype], seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomPoisson_dtype]: + r"""Use RandomPoissonV2 instead. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + rate: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. + seed: An optional `int`. Defaults to `0`. + seed2: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `rate`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomPoisson", name, shape, rate, "seed", seed, "seed2", + seed2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_poisson_eager_fallback( + shape, rate, seed=seed, seed2=seed2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomPoisson", shape=shape, rate=rate, seed=seed, seed2=seed2, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "S", _op._get_attr_type("S"), + "dtype", _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomPoisson", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomPoisson = tf_export("raw_ops.RandomPoisson")(_ops.to_raw_op(random_poisson)) + + +def random_poisson_eager_fallback(shape: Annotated[Any, TV_RandomPoisson_S], rate: Annotated[Any, TV_RandomPoisson_dtype], seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomPoisson_dtype]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_dtype, (rate,) = _execute.args_to_matching_eager([rate], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [shape, rate] + _attrs = ("seed", seed, "seed2", seed2, "S", _attr_S, "dtype", _attr_dtype) + _result = _execute.execute(b"RandomPoisson", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomPoisson", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomPoissonV2_S = TypeVar("TV_RandomPoissonV2_S", _atypes.Int32, _atypes.Int64) +TV_RandomPoissonV2_R = TypeVar("TV_RandomPoissonV2_R", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_RandomPoissonV2_dtype = TypeVar("TV_RandomPoissonV2_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def random_poisson_v2(shape: Annotated[Any, TV_RandomPoissonV2_S], rate: Annotated[Any, TV_RandomPoissonV2_R], seed:int=0, seed2:int=0, dtype:TV_RandomPoissonV2_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_RandomPoissonV2_dtype]: + r"""Outputs random values from the Poisson distribution(s) described by rate. + + This op uses two algorithms, depending on rate. If rate >= 10, then + the algorithm by Hormann is used to acquire samples via + transformation-rejection. + See http://www.sciencedirect.com/science/article/pii/0167668793909974. + + Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + random variables. + See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + Programming, Volume 2. Addison Wesley + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in rate. + rate: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`. + A tensor in which each scalar is a "rate" parameter describing the + associated poisson distribution. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomPoissonV2", name, shape, rate, "seed", seed, "seed2", + seed2, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_poisson_v2_eager_fallback( + shape, rate, seed=seed, seed2=seed2, dtype=dtype, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomPoissonV2", shape=shape, rate=rate, seed=seed, seed2=seed2, + dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "S", _op._get_attr_type("S"), "R", + _op._get_attr_type("R"), "dtype", _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomPoissonV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomPoissonV2 = tf_export("raw_ops.RandomPoissonV2")(_ops.to_raw_op(random_poisson_v2)) + + +def random_poisson_v2_eager_fallback(shape: Annotated[Any, TV_RandomPoissonV2_S], rate: Annotated[Any, TV_RandomPoissonV2_R], seed: int, seed2: int, dtype: TV_RandomPoissonV2_dtype, name, ctx) -> Annotated[Any, TV_RandomPoissonV2_dtype]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_R, (rate,) = _execute.args_to_matching_eager([rate], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ], _dtypes.float64) + _inputs_flat = [shape, rate] + _attrs = ("seed", seed, "seed2", seed2, "S", _attr_S, "R", _attr_R, "dtype", + dtype) + _result = _execute.execute(b"RandomPoissonV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomPoissonV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomShuffle_T = TypeVar("TV_RandomShuffle_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def random_shuffle(value: Annotated[Any, TV_RandomShuffle_T], seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomShuffle_T]: + r"""Randomly shuffles a tensor along its first dimension. + + The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + to one and only one `output[i]`. For example, a mapping that might occur for a + 3x2 tensor is: + + ``` + [[1, 2], [[5, 6], + [3, 4], ==> [1, 2], + [5, 6]] [3, 4]] + ``` + + Args: + value: A `Tensor`. The tensor to be shuffled. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `value`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomShuffle", name, value, "seed", seed, "seed2", seed2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_shuffle_eager_fallback( + value, seed=seed, seed2=seed2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomShuffle", value=value, seed=seed, seed2=seed2, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomShuffle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomShuffle = tf_export("raw_ops.RandomShuffle")(_ops.to_raw_op(random_shuffle)) + + +def random_shuffle_eager_fallback(value: Annotated[Any, TV_RandomShuffle_T], seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomShuffle_T]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) + _inputs_flat = [value] + _attrs = ("seed", seed, "seed2", seed2, "T", _attr_T) + _result = _execute.execute(b"RandomShuffle", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomShuffle", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomStandardNormal_dtype = TypeVar("TV_RandomStandardNormal_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_RandomStandardNormal_T = TypeVar("TV_RandomStandardNormal_T", _atypes.Int32, _atypes.Int64) + +def random_standard_normal(shape: Annotated[Any, TV_RandomStandardNormal_T], dtype: TV_RandomStandardNormal_dtype, seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomStandardNormal_dtype]: + r"""Outputs random values from a normal distribution. + + The generated values will have mean 0 and standard deviation 1. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + dtype: A `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. + The type of the output. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomStandardNormal", name, shape, "seed", seed, "seed2", + seed2, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_standard_normal_eager_fallback( + shape, seed=seed, seed2=seed2, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomStandardNormal", shape=shape, dtype=dtype, seed=seed, + seed2=seed2, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "dtype", + _op._get_attr_type("dtype"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomStandardNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomStandardNormal = tf_export("raw_ops.RandomStandardNormal")(_ops.to_raw_op(random_standard_normal)) + + +def random_standard_normal_eager_fallback(shape: Annotated[Any, TV_RandomStandardNormal_T], dtype: TV_RandomStandardNormal_dtype, seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomStandardNormal_dtype]: + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [shape] + _attrs = ("seed", seed, "seed2", seed2, "dtype", dtype, "T", _attr_T) + _result = _execute.execute(b"RandomStandardNormal", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomStandardNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomUniform_dtype = TypeVar("TV_RandomUniform_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_RandomUniform_T = TypeVar("TV_RandomUniform_T", _atypes.Int32, _atypes.Int64) + +def random_uniform(shape: Annotated[Any, TV_RandomUniform_T], dtype: TV_RandomUniform_dtype, seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomUniform_dtype]: + r"""Outputs random values from a uniform distribution. + + The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + dtype: A `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. + The type of the output. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomUniform", name, shape, "seed", seed, "seed2", seed2, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_uniform_eager_fallback( + shape, seed=seed, seed2=seed2, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomUniform", shape=shape, dtype=dtype, seed=seed, seed2=seed2, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "dtype", + _op._get_attr_type("dtype"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomUniform = tf_export("raw_ops.RandomUniform")(_ops.to_raw_op(random_uniform)) + + +def random_uniform_eager_fallback(shape: Annotated[Any, TV_RandomUniform_T], dtype: TV_RandomUniform_dtype, seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomUniform_dtype]: + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [shape] + _attrs = ("seed", seed, "seed2", seed2, "dtype", dtype, "T", _attr_T) + _result = _execute.execute(b"RandomUniform", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_RandomUniformInt_Tout = TypeVar("TV_RandomUniformInt_Tout", _atypes.Int32, _atypes.Int64) +TV_RandomUniformInt_T = TypeVar("TV_RandomUniformInt_T", _atypes.Int32, _atypes.Int64) + +def random_uniform_int(shape: Annotated[Any, TV_RandomUniformInt_T], minval: Annotated[Any, TV_RandomUniformInt_Tout], maxval: Annotated[Any, TV_RandomUniformInt_Tout], seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_RandomUniformInt_Tout]: + r"""Outputs random integers from a uniform distribution. + + The generated values are uniform integers in the range `[minval, maxval)`. + The lower bound `minval` is included in the range, while the upper bound + `maxval` is excluded. + + The random integers are slightly biased unless `maxval - minval` is an exact + power of two. The bias is small for values of `maxval - minval` significantly + smaller than the range of the output (either `2^32` or `2^64`). + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + minval: A `Tensor`. Must be one of the following types: `int32`, `int64`. + 0-D. Inclusive lower bound on the generated integers. + maxval: A `Tensor`. Must have the same type as `minval`. + 0-D. Exclusive upper bound on the generated integers. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `minval`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RandomUniformInt", name, shape, minval, maxval, "seed", seed, + "seed2", seed2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return random_uniform_int_eager_fallback( + shape, minval, maxval, seed=seed, seed2=seed2, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RandomUniformInt", shape=shape, minval=minval, maxval=maxval, + seed=seed, seed2=seed2, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "Tout", _op._get_attr_type("Tout"), + "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RandomUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RandomUniformInt = tf_export("raw_ops.RandomUniformInt")(_ops.to_raw_op(random_uniform_int)) + + +def random_uniform_int_eager_fallback(shape: Annotated[Any, TV_RandomUniformInt_T], minval: Annotated[Any, TV_RandomUniformInt_Tout], maxval: Annotated[Any, TV_RandomUniformInt_Tout], seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_RandomUniformInt_Tout]: + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_Tout, _inputs_Tout = _execute.args_to_matching_eager([minval, maxval], ctx, [_dtypes.int32, _dtypes.int64, ]) + (minval, maxval) = _inputs_Tout + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [shape, minval, maxval] + _attrs = ("seed", seed, "seed2", seed2, "Tout", _attr_Tout, "T", _attr_T) + _result = _execute.execute(b"RandomUniformInt", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RandomUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TruncatedNormal_dtype = TypeVar("TV_TruncatedNormal_dtype", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) +TV_TruncatedNormal_T = TypeVar("TV_TruncatedNormal_T", _atypes.Int32, _atypes.Int64) + +def truncated_normal(shape: Annotated[Any, TV_TruncatedNormal_T], dtype: TV_TruncatedNormal_dtype, seed:int=0, seed2:int=0, name=None) -> Annotated[Any, TV_TruncatedNormal_dtype]: + r"""Outputs random values from a truncated normal distribution. + + The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + Args: + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The shape of the output tensor. + dtype: A `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. + The type of the output. + seed: An optional `int`. Defaults to `0`. + If either `seed` or `seed2` are set to be non-zero, the random number + generator is seeded by the given seed. Otherwise, it is seeded by a + random seed. + seed2: An optional `int`. Defaults to `0`. + A second seed to avoid seed collision. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TruncatedNormal", name, shape, "seed", seed, "seed2", seed2, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return truncated_normal_eager_fallback( + shape, seed=seed, seed2=seed2, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TruncatedNormal", shape=shape, dtype=dtype, seed=seed, seed2=seed2, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("seed", _op._get_attr_int("seed"), "seed2", + _op._get_attr_int("seed2"), "dtype", + _op._get_attr_type("dtype"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TruncatedNormal = tf_export("raw_ops.TruncatedNormal")(_ops.to_raw_op(truncated_normal)) + + +def truncated_normal_eager_fallback(shape: Annotated[Any, TV_TruncatedNormal_T], dtype: TV_TruncatedNormal_dtype, seed: int, seed2: int, name, ctx) -> Annotated[Any, TV_TruncatedNormal_dtype]: + dtype = _execute.make_type(dtype, "dtype") + if seed is None: + seed = 0 + seed = _execute.make_int(seed, "seed") + if seed2 is None: + seed2 = 0 + seed2 = _execute.make_int(seed2, "seed2") + _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [shape] + _attrs = ("seed", seed, "seed2", seed2, "dtype", dtype, "T", _attr_T) + _result = _execute.execute(b"TruncatedNormal", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateful_random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateful_random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ef5549c2d4d86ab4421fda56ee52bf3833de32 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_stateful_random_ops.py @@ -0,0 +1,748 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_NonDeterministicInts_dtype = TypeVar("TV_NonDeterministicInts_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_NonDeterministicInts_shape_dtype = TypeVar("TV_NonDeterministicInts_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def non_deterministic_ints(shape: Annotated[Any, TV_NonDeterministicInts_shape_dtype], dtype:TV_NonDeterministicInts_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_NonDeterministicInts_dtype]: + r"""Non-deterministically generates some integers. + + This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. + + Args: + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.int64`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "NonDeterministicInts", name, shape, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return non_deterministic_ints_eager_fallback( + shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "NonDeterministicInts", shape=shape, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "NonDeterministicInts", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +NonDeterministicInts = tf_export("raw_ops.NonDeterministicInts")(_ops.to_raw_op(non_deterministic_ints)) + + +def non_deterministic_ints_eager_fallback(shape: Annotated[Any, TV_NonDeterministicInts_shape_dtype], dtype: TV_NonDeterministicInts_dtype, name, ctx) -> Annotated[Any, TV_NonDeterministicInts_dtype]: + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + _inputs_flat = [shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"NonDeterministicInts", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "NonDeterministicInts", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def rng_read_and_skip(resource: Annotated[Any, _atypes.Resource], alg: Annotated[Any, _atypes.Int32], delta: Annotated[Any, _atypes.UInt64], name=None) -> Annotated[Any, _atypes.Int64]: + r"""Advance the counter of a counter-based RNG. + + The state of the RNG after + `rng_read_and_skip(n)` will be the same as that after `uniform([n])` + (or any other distribution). The actual increment added to the + counter is an unspecified implementation choice. + + In the case that the input algorithm is RNG_ALG_AUTO_SELECT, the counter in the state needs to be of size int64[2], the current maximal counter size among algorithms. In this case, this op will manage the counter as if it is an 128-bit integer with layout [lower_64bits, higher_64bits]. If an algorithm needs less than 128 bits for the counter, it should use the left portion of the int64[2]. In this way, the int64[2] is compatible with all current RNG algorithms (Philox, ThreeFry and xla::RandomAlgorithm::RNG_DEFAULT). Downstream RNG ops can thus use this counter with any RNG algorithm. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. The state consists of the counter followed by the key. + alg: A `Tensor` of type `int32`. The RNG algorithm. + delta: A `Tensor` of type `uint64`. The amount of advancement. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RngReadAndSkip", name, resource, alg, delta) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rng_read_and_skip_eager_fallback( + resource, alg, delta, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RngReadAndSkip", resource=resource, alg=alg, delta=delta, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RngReadAndSkip", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RngReadAndSkip = tf_export("raw_ops.RngReadAndSkip")(_ops.to_raw_op(rng_read_and_skip)) + + +def rng_read_and_skip_eager_fallback(resource: Annotated[Any, _atypes.Resource], alg: Annotated[Any, _atypes.Int32], delta: Annotated[Any, _atypes.UInt64], name, ctx) -> Annotated[Any, _atypes.Int64]: + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + alg = _ops.convert_to_tensor(alg, _dtypes.int32) + delta = _ops.convert_to_tensor(delta, _dtypes.uint64) + _inputs_flat = [resource, alg, delta] + _attrs = None + _result = _execute.execute(b"RngReadAndSkip", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RngReadAndSkip", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def rng_skip(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], delta: Annotated[Any, _atypes.Int64], name=None): + r"""Advance the counter of a counter-based RNG. + + The state of the RNG after + `rng_skip(n)` will be the same as that after `stateful_uniform([n])` + (or any other distribution). The actual increment added to the + counter is an unspecified implementation detail. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + delta: A `Tensor` of type `int64`. The amount of advancement. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RngSkip", name, resource, algorithm, delta) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return rng_skip_eager_fallback( + resource, algorithm, delta, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RngSkip", resource=resource, algorithm=algorithm, delta=delta, + name=name) + return _op +RngSkip = tf_export("raw_ops.RngSkip")(_ops.to_raw_op(rng_skip)) + + +def rng_skip_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], delta: Annotated[Any, _atypes.Int64], name, ctx): + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + delta = _ops.convert_to_tensor(delta, _dtypes.int64) + _inputs_flat = [resource, algorithm, delta] + _attrs = None + _result = _execute.execute(b"RngSkip", 0, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + _result = None + return _result + + +TV_StatefulRandomBinomial_S = TypeVar("TV_StatefulRandomBinomial_S", _atypes.Int32, _atypes.Int64) +TV_StatefulRandomBinomial_T = TypeVar("TV_StatefulRandomBinomial_T", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) +TV_StatefulRandomBinomial_dtype = TypeVar("TV_StatefulRandomBinomial_dtype", _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.Int64) + +def stateful_random_binomial(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulRandomBinomial_S], counts: Annotated[Any, TV_StatefulRandomBinomial_T], probs: Annotated[Any, TV_StatefulRandomBinomial_T], dtype:TV_StatefulRandomBinomial_dtype=_dtypes.int64, name=None) -> Annotated[Any, TV_StatefulRandomBinomial_dtype]: + r"""TODO: add doc. + + Args: + resource: A `Tensor` of type `resource`. + algorithm: A `Tensor` of type `int64`. + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + counts: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`. + probs: A `Tensor`. Must have the same type as `counts`. + dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulRandomBinomial", name, resource, algorithm, shape, + counts, probs, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_random_binomial_eager_fallback( + resource, algorithm, shape, counts, probs, dtype=dtype, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulRandomBinomial", resource=resource, algorithm=algorithm, + shape=shape, counts=counts, probs=probs, + dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("S", _op._get_attr_type("S"), "T", _op._get_attr_type("T"), + "dtype", _op._get_attr_type("dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulRandomBinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulRandomBinomial = tf_export("raw_ops.StatefulRandomBinomial")(_ops.to_raw_op(stateful_random_binomial)) + + +def stateful_random_binomial_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulRandomBinomial_S], counts: Annotated[Any, TV_StatefulRandomBinomial_T], probs: Annotated[Any, TV_StatefulRandomBinomial_T], dtype: TV_StatefulRandomBinomial_dtype, name, ctx) -> Annotated[Any, TV_StatefulRandomBinomial_dtype]: + if dtype is None: + dtype = _dtypes.int64 + dtype = _execute.make_type(dtype, "dtype") + _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ]) + _attr_T, _inputs_T = _execute.args_to_matching_eager([counts, probs], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ], _dtypes.float64) + (counts, probs) = _inputs_T + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape, counts, probs] + _attrs = ("S", _attr_S, "T", _attr_T, "dtype", dtype) + _result = _execute.execute(b"StatefulRandomBinomial", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulRandomBinomial", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulStandardNormal_dtype = TypeVar("TV_StatefulStandardNormal_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulStandardNormal_shape_dtype = TypeVar("TV_StatefulStandardNormal_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_standard_normal(resource: Annotated[Any, _atypes.Resource], shape: Annotated[Any, TV_StatefulStandardNormal_shape_dtype], dtype:TV_StatefulStandardNormal_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatefulStandardNormal_dtype]: + r"""Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2' + + The generated values will have mean 0 and standard deviation 1. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulStandardNormal", name, resource, shape, "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_standard_normal_eager_fallback( + resource, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulStandardNormal", resource=resource, shape=shape, dtype=dtype, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulStandardNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulStandardNormal = tf_export("raw_ops.StatefulStandardNormal")(_ops.to_raw_op(stateful_standard_normal)) + + +def stateful_standard_normal_eager_fallback(resource: Annotated[Any, _atypes.Resource], shape: Annotated[Any, TV_StatefulStandardNormal_shape_dtype], dtype: TV_StatefulStandardNormal_dtype, name, ctx) -> Annotated[Any, TV_StatefulStandardNormal_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + _inputs_flat = [resource, shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulStandardNormal", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulStandardNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulStandardNormalV2_dtype = TypeVar("TV_StatefulStandardNormalV2_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulStandardNormalV2_shape_dtype = TypeVar("TV_StatefulStandardNormalV2_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_standard_normal_v2(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulStandardNormalV2_shape_dtype], dtype:TV_StatefulStandardNormalV2_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatefulStandardNormalV2_dtype]: + r"""Outputs random values from a normal distribution. + + The generated values will have mean 0 and standard deviation 1. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulStandardNormalV2", name, resource, algorithm, shape, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_standard_normal_v2_eager_fallback( + resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulStandardNormalV2", resource=resource, algorithm=algorithm, + shape=shape, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulStandardNormalV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulStandardNormalV2 = tf_export("raw_ops.StatefulStandardNormalV2")(_ops.to_raw_op(stateful_standard_normal_v2)) + + +def stateful_standard_normal_v2_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulStandardNormalV2_shape_dtype], dtype: TV_StatefulStandardNormalV2_dtype, name, ctx) -> Annotated[Any, TV_StatefulStandardNormalV2_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulStandardNormalV2", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulStandardNormalV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulTruncatedNormal_dtype = TypeVar("TV_StatefulTruncatedNormal_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulTruncatedNormal_shape_dtype = TypeVar("TV_StatefulTruncatedNormal_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_truncated_normal(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulTruncatedNormal_shape_dtype], dtype:TV_StatefulTruncatedNormal_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatefulTruncatedNormal_dtype]: + r"""Outputs random values from a truncated normal distribution. + + The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulTruncatedNormal", name, resource, algorithm, shape, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_truncated_normal_eager_fallback( + resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulTruncatedNormal", resource=resource, algorithm=algorithm, + shape=shape, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulTruncatedNormal = tf_export("raw_ops.StatefulTruncatedNormal")(_ops.to_raw_op(stateful_truncated_normal)) + + +def stateful_truncated_normal_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulTruncatedNormal_shape_dtype], dtype: TV_StatefulTruncatedNormal_dtype, name, ctx) -> Annotated[Any, TV_StatefulTruncatedNormal_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulTruncatedNormal", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulTruncatedNormal", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulUniform_dtype = TypeVar("TV_StatefulUniform_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulUniform_shape_dtype = TypeVar("TV_StatefulUniform_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_uniform(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniform_shape_dtype], dtype:TV_StatefulUniform_dtype=_dtypes.float32, name=None) -> Annotated[Any, TV_StatefulUniform_dtype]: + r"""Outputs random values from a uniform distribution. + + The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.float32`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulUniform", name, resource, algorithm, shape, "dtype", + dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_uniform_eager_fallback( + resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulUniform", resource=resource, algorithm=algorithm, + shape=shape, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulUniform = tf_export("raw_ops.StatefulUniform")(_ops.to_raw_op(stateful_uniform)) + + +def stateful_uniform_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniform_shape_dtype], dtype: TV_StatefulUniform_dtype, name, ctx) -> Annotated[Any, TV_StatefulUniform_dtype]: + if dtype is None: + dtype = _dtypes.float32 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulUniform", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulUniform", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulUniformFullInt_dtype = TypeVar("TV_StatefulUniformFullInt_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulUniformFullInt_shape_dtype = TypeVar("TV_StatefulUniformFullInt_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_uniform_full_int(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniformFullInt_shape_dtype], dtype:TV_StatefulUniformFullInt_dtype=_dtypes.uint64, name=None) -> Annotated[Any, TV_StatefulUniformFullInt_dtype]: + r"""Outputs random integers from a uniform distribution. + + The generated values are uniform integers covering the whole range of `dtype`. + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + shape: A `Tensor`. The shape of the output tensor. + dtype: An optional `tf.DType`. Defaults to `tf.uint64`. + The type of the output. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulUniformFullInt", name, resource, algorithm, shape, + "dtype", dtype) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_uniform_full_int_eager_fallback( + resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulUniformFullInt", resource=resource, algorithm=algorithm, + shape=shape, dtype=dtype, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulUniformFullInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulUniformFullInt = tf_export("raw_ops.StatefulUniformFullInt")(_ops.to_raw_op(stateful_uniform_full_int)) + + +def stateful_uniform_full_int_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniformFullInt_shape_dtype], dtype: TV_StatefulUniformFullInt_dtype, name, ctx) -> Annotated[Any, TV_StatefulUniformFullInt_dtype]: + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape] + _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulUniformFullInt", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulUniformFullInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_StatefulUniformInt_dtype = TypeVar("TV_StatefulUniformInt_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_StatefulUniformInt_shape_dtype = TypeVar("TV_StatefulUniformInt_shape_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def stateful_uniform_int(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniformInt_shape_dtype], minval: Annotated[Any, TV_StatefulUniformInt_dtype], maxval: Annotated[Any, TV_StatefulUniformInt_dtype], name=None) -> Annotated[Any, TV_StatefulUniformInt_dtype]: + r"""Outputs random integers from a uniform distribution. + + The generated values are uniform integers in the range `[minval, maxval)`. + The lower bound `minval` is included in the range, while the upper bound + `maxval` is excluded. + + The random integers are slightly biased unless `maxval - minval` is an exact + power of two. The bias is small for values of `maxval - minval` significantly + smaller than the range of the output (either `2^32` or `2^64`). + + Args: + resource: A `Tensor` of type `resource`. + The handle of the resource variable that stores the state of the RNG. + algorithm: A `Tensor` of type `int64`. The RNG algorithm. + shape: A `Tensor`. The shape of the output tensor. + minval: A `Tensor`. Minimum value (inclusive, scalar). + maxval: A `Tensor`. Must have the same type as `minval`. + Maximum value (exclusive, scalar). + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `minval`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "StatefulUniformInt", name, resource, algorithm, shape, minval, + maxval) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return stateful_uniform_int_eager_fallback( + resource, algorithm, shape, minval, maxval, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "StatefulUniformInt", resource=resource, algorithm=algorithm, + shape=shape, minval=minval, maxval=maxval, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype", + _op._get_attr_type("shape_dtype")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "StatefulUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +StatefulUniformInt = tf_export("raw_ops.StatefulUniformInt")(_ops.to_raw_op(stateful_uniform_int)) + + +def stateful_uniform_int_eager_fallback(resource: Annotated[Any, _atypes.Resource], algorithm: Annotated[Any, _atypes.Int64], shape: Annotated[Any, TV_StatefulUniformInt_shape_dtype], minval: Annotated[Any, TV_StatefulUniformInt_dtype], maxval: Annotated[Any, TV_StatefulUniformInt_dtype], name, ctx) -> Annotated[Any, TV_StatefulUniformInt_dtype]: + _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([minval, maxval], ctx, [], _dtypes.int64) + (minval, maxval) = _inputs_dtype + _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64) + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64) + _inputs_flat = [resource, algorithm, shape, minval, maxval] + _attrs = ("dtype", _attr_dtype, "shape_dtype", _attr_shape_dtype) + _result = _execute.execute(b"StatefulUniformInt", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "StatefulUniformInt", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d31d1dfc52bff8055bfbe9fb0b9609bdb73709 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_tpu_ops.py @@ -0,0 +1,6675 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_AllToAll_T = TypeVar("TV_AllToAll_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +def all_to_all(input: Annotated[Any, TV_AllToAll_T], group_assignment: Annotated[Any, _atypes.Int32], concat_dimension: int, split_dimension: int, split_count: int, name=None) -> Annotated[Any, TV_AllToAll_T]: + r"""An Op to exchange data across TPU replicas. + + On each replica, the input is split into `split_count` blocks along + `split_dimension` and send to the other replicas given group_assignment. After + receiving `split_count` - 1 blocks from other replicas, we concatenate the + blocks along `concat_dimension` as the output. + + For example, suppose there are 2 TPU replicas: + replica 0 receives input: `[[A, B]]` + replica 1 receives input: `[[C, D]]` + + group_assignment=`[[0, 1]]` + concat_dimension=0 + split_dimension=1 + split_count=2 + + replica 0's output: `[[A], [C]]` + replica 1's output: `[[B], [D]]` + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + The local input to the sum. + group_assignment: A `Tensor` of type `int32`. An int32 tensor with shape + [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the + replica ids in the ith subgroup. + concat_dimension: An `int`. The dimension number to concatenate. + split_dimension: An `int`. The dimension number to split. + split_count: An `int`. + The number of splits, this number must equal to the sub-group + size(group_assignment.get_shape()[1]) + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AllToAll", name, input, group_assignment, "concat_dimension", + concat_dimension, "split_dimension", split_dimension, "split_count", + split_count) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return all_to_all_eager_fallback( + input, group_assignment, concat_dimension=concat_dimension, + split_dimension=split_dimension, split_count=split_count, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + concat_dimension = _execute.make_int(concat_dimension, "concat_dimension") + split_dimension = _execute.make_int(split_dimension, "split_dimension") + split_count = _execute.make_int(split_count, "split_count") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AllToAll", input=input, group_assignment=group_assignment, + concat_dimension=concat_dimension, + split_dimension=split_dimension, split_count=split_count, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "concat_dimension", + _op._get_attr_int("concat_dimension"), "split_dimension", + _op._get_attr_int("split_dimension"), "split_count", + _op._get_attr_int("split_count")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AllToAll", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AllToAll = tf_export("raw_ops.AllToAll")(_ops.to_raw_op(all_to_all)) + + +def all_to_all_eager_fallback(input: Annotated[Any, TV_AllToAll_T], group_assignment: Annotated[Any, _atypes.Int32], concat_dimension: int, split_dimension: int, split_count: int, name, ctx) -> Annotated[Any, TV_AllToAll_T]: + concat_dimension = _execute.make_int(concat_dimension, "concat_dimension") + split_dimension = _execute.make_int(split_dimension, "split_dimension") + split_count = _execute.make_int(split_count, "split_count") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + _inputs_flat = [input, group_assignment] + _attrs = ("T", _attr_T, "concat_dimension", concat_dimension, + "split_dimension", split_dimension, "split_count", split_count) + _result = _execute.execute(b"AllToAll", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AllToAll", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_AssignVariableXlaConcatND_T = TypeVar("TV_AssignVariableXlaConcatND_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def assign_variable_xla_concat_nd(resource: Annotated[Any, _atypes.Resource], inputs: Annotated[List[Any], TV_AssignVariableXlaConcatND_T], num_concats, paddings=[], name=None): + r"""Concats input tensor across all dimensions. + + An op which merges slices the input tensor based on the given num_splits + attribute, strips paddings optionally, and writes the merged tensor without + paddings to the resource variable. + + This op may be generated via the TPU bridge. + + For example, with `input` tensor: + ``` + [[0, 1], + [4, 5]] + [[2, 3], + [6, 7]] + [[8, 9], + [12, 13]] + [[10, 11], + [14, 15]] + ``` + `num_splits`: + ``` + [2, 2] + ``` + and `paddings`: + ``` + [1, 1] + ``` + the expected `outputs` is: + ``` + [[0, 1, 2], + [4, 5, 6], + [8, 9, 10]] + ``` + + Args: + resource: A `Tensor` of type `resource`. + Resource variable for concatenated input tensors across all dimensions. + } + in_arg { + name: "inputs" + description: < Annotated[Any, TV_CollectivePermute_T]: + r"""An Op to permute tensors across replicated TPU instances. + + Each instance supplies its own input. + + For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing + source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: + `[D, A, B, C]`. + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + The local input to be permuted. Currently only supports float and + bfloat16. + source_target_pairs: A `Tensor` of type `int32`. + A tensor with shape [num_pairs, 2]. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CollectivePermute", name, input, source_target_pairs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return collective_permute_eager_fallback( + input, source_target_pairs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CollectivePermute", input=input, + source_target_pairs=source_target_pairs, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CollectivePermute", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CollectivePermute = tf_export("raw_ops.CollectivePermute")(_ops.to_raw_op(collective_permute)) + + +def collective_permute_eager_fallback(input: Annotated[Any, TV_CollectivePermute_T], source_target_pairs: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_CollectivePermute_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + source_target_pairs = _ops.convert_to_tensor(source_target_pairs, _dtypes.int32) + _inputs_flat = [input, source_target_pairs] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"CollectivePermute", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CollectivePermute", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def configure_distributed_tpu(embedding_config:str="", tpu_embedding_config:str="", is_global_init:bool=False, enable_whole_mesh_compilations:bool=False, compilation_failure_closes_chips:bool=True, tpu_cancellation_closes_chips:int=0, name=None) -> Annotated[Any, _atypes.String]: + r"""Sets up the centralized structures for a distributed TPU system. + + Args: + embedding_config: An optional `string`. Defaults to `""`. + Reserved. Do not use. + tpu_embedding_config: An optional `string`. Defaults to `""`. + Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + describes the embedding lookups of the program. + is_global_init: An optional `bool`. Defaults to `False`. + Reserved. Do not use. + enable_whole_mesh_compilations: An optional `bool`. Defaults to `False`. + compilation_failure_closes_chips: An optional `bool`. Defaults to `True`. + tpu_cancellation_closes_chips: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ConfigureDistributedTPU", name, "embedding_config", + embedding_config, "tpu_embedding_config", tpu_embedding_config, + "is_global_init", is_global_init, "enable_whole_mesh_compilations", + enable_whole_mesh_compilations, "compilation_failure_closes_chips", + compilation_failure_closes_chips, "tpu_cancellation_closes_chips", + tpu_cancellation_closes_chips) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return configure_distributed_tpu_eager_fallback( + embedding_config=embedding_config, + tpu_embedding_config=tpu_embedding_config, + is_global_init=is_global_init, + enable_whole_mesh_compilations=enable_whole_mesh_compilations, + compilation_failure_closes_chips=compilation_failure_closes_chips, + tpu_cancellation_closes_chips=tpu_cancellation_closes_chips, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if embedding_config is None: + embedding_config = "" + embedding_config = _execute.make_str(embedding_config, "embedding_config") + if tpu_embedding_config is None: + tpu_embedding_config = "" + tpu_embedding_config = _execute.make_str(tpu_embedding_config, "tpu_embedding_config") + if is_global_init is None: + is_global_init = False + is_global_init = _execute.make_bool(is_global_init, "is_global_init") + if enable_whole_mesh_compilations is None: + enable_whole_mesh_compilations = False + enable_whole_mesh_compilations = _execute.make_bool(enable_whole_mesh_compilations, "enable_whole_mesh_compilations") + if compilation_failure_closes_chips is None: + compilation_failure_closes_chips = True + compilation_failure_closes_chips = _execute.make_bool(compilation_failure_closes_chips, "compilation_failure_closes_chips") + if tpu_cancellation_closes_chips is None: + tpu_cancellation_closes_chips = 0 + tpu_cancellation_closes_chips = _execute.make_int(tpu_cancellation_closes_chips, "tpu_cancellation_closes_chips") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ConfigureDistributedTPU", embedding_config=embedding_config, + tpu_embedding_config=tpu_embedding_config, + is_global_init=is_global_init, + enable_whole_mesh_compilations=enable_whole_mesh_compilations, + compilation_failure_closes_chips=compilation_failure_closes_chips, + tpu_cancellation_closes_chips=tpu_cancellation_closes_chips, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("embedding_config", _op.get_attr("embedding_config"), + "tpu_embedding_config", _op.get_attr("tpu_embedding_config"), + "is_global_init", _op._get_attr_bool("is_global_init"), + "enable_whole_mesh_compilations", + _op._get_attr_bool("enable_whole_mesh_compilations"), + "compilation_failure_closes_chips", + _op._get_attr_bool("compilation_failure_closes_chips"), + "tpu_cancellation_closes_chips", + _op._get_attr_int("tpu_cancellation_closes_chips")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ConfigureDistributedTPU", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +ConfigureDistributedTPU = tf_export("raw_ops.ConfigureDistributedTPU")(_ops.to_raw_op(configure_distributed_tpu)) + + +def configure_distributed_tpu_eager_fallback(embedding_config: str, tpu_embedding_config: str, is_global_init: bool, enable_whole_mesh_compilations: bool, compilation_failure_closes_chips: bool, tpu_cancellation_closes_chips: int, name, ctx) -> Annotated[Any, _atypes.String]: + if embedding_config is None: + embedding_config = "" + embedding_config = _execute.make_str(embedding_config, "embedding_config") + if tpu_embedding_config is None: + tpu_embedding_config = "" + tpu_embedding_config = _execute.make_str(tpu_embedding_config, "tpu_embedding_config") + if is_global_init is None: + is_global_init = False + is_global_init = _execute.make_bool(is_global_init, "is_global_init") + if enable_whole_mesh_compilations is None: + enable_whole_mesh_compilations = False + enable_whole_mesh_compilations = _execute.make_bool(enable_whole_mesh_compilations, "enable_whole_mesh_compilations") + if compilation_failure_closes_chips is None: + compilation_failure_closes_chips = True + compilation_failure_closes_chips = _execute.make_bool(compilation_failure_closes_chips, "compilation_failure_closes_chips") + if tpu_cancellation_closes_chips is None: + tpu_cancellation_closes_chips = 0 + tpu_cancellation_closes_chips = _execute.make_int(tpu_cancellation_closes_chips, "tpu_cancellation_closes_chips") + _inputs_flat = [] + _attrs = ("embedding_config", embedding_config, "tpu_embedding_config", + tpu_embedding_config, "is_global_init", is_global_init, + "enable_whole_mesh_compilations", enable_whole_mesh_compilations, + "compilation_failure_closes_chips", compilation_failure_closes_chips, + "tpu_cancellation_closes_chips", tpu_cancellation_closes_chips) + _result = _execute.execute(b"ConfigureDistributedTPU", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ConfigureDistributedTPU", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def configure_tpu_embedding(config: str, name=None): + r"""Sets up TPUEmbedding in a distributed TPU system. + + Args: + config: A `string`. + Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + describes the embedding lookups of the program. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ConfigureTPUEmbedding", name, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return configure_tpu_embedding_eager_fallback( + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ConfigureTPUEmbedding", config=config, name=name) + return _op +ConfigureTPUEmbedding = tf_export("raw_ops.ConfigureTPUEmbedding")(_ops.to_raw_op(configure_tpu_embedding)) + + +def configure_tpu_embedding_eager_fallback(config: str, name, ctx): + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("config", config) + _result = _execute.execute(b"ConfigureTPUEmbedding", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_CrossReplicaSum_T = TypeVar("TV_CrossReplicaSum_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int32, _atypes.UInt32) + +def cross_replica_sum(input: Annotated[Any, TV_CrossReplicaSum_T], group_assignment: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_CrossReplicaSum_T]: + r"""An Op to sum inputs across replicated TPU instances. + + Each instance supplies its own input. + + For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. + Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, + and `B, D, F, H` as group 1. Thus we get the outputs: + `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`. + + Args: + input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `uint32`. + The local input to the sum. + group_assignment: A `Tensor` of type `int32`. An int32 tensor with shape + [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the + replica ids in the ith subgroup. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CrossReplicaSum", name, input, group_assignment) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return cross_replica_sum_eager_fallback( + input, group_assignment, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CrossReplicaSum", input=input, group_assignment=group_assignment, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CrossReplicaSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CrossReplicaSum = tf_export("raw_ops.CrossReplicaSum")(_ops.to_raw_op(cross_replica_sum)) + + +def cross_replica_sum_eager_fallback(input: Annotated[Any, TV_CrossReplicaSum_T], group_assignment: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_CrossReplicaSum_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint32, ]) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + _inputs_flat = [input, group_assignment] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"CrossReplicaSum", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CrossReplicaSum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T1 = TypeVar("TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T1", _atypes.Int32, _atypes.Int64) +TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T2 = TypeVar("TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T2", _atypes.Int32, _atypes.Int64) +TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T3 = TypeVar("TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T3", _atypes.Float32, _atypes.Float64) + +def dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: Annotated[Any, _atypes.Int32], combiners=[], name=None): + r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + + embedding_indices[i] and aggregation_weights[i] correspond + to the ith feature. + + The tensors at corresponding positions in the three input lists (sample_indices, + embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + with dim_size() equal to the total number of lookups into the table described by + the corresponding feature. + + Args: + sample_indices_or_row_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 2 Tensors specifying the training example to which the + corresponding embedding_indices and aggregation_weights values belong. + If the size of its first dimension is 0, we assume each embedding_indices + belongs to a different sample. Both int32 and int64 are allowed and will + be converted to int32 internally. + + Or a list of rank 1 Tensors specifying the row splits for splitting + embedding_indices and aggregation_weights into rows. It corresponds to + ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When + enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + the row splits is 1-D dense tensor. When empty, we assume a dense tensor is + passed to the op Both int32 and int64 are allowed and will be converted to + int32 internally. + embedding_indices: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors, indices into the embedding + tables. Both int32 and int64 are allowed and will be converted to + int32 internally. + aggregation_weights: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `float32`, `float64`. + A list of rank 1 Tensors containing per training + example aggregation weights. Both float32 and float64 are allowed and will + be converted to float32 internally. + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + device_ordinal: A `Tensor` of type `int32`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + combiners: An optional list of `strings`. Defaults to `[]`. + A list of string scalars, one for each embedding table that specify + how to normalize the embedding activations after weighted summation. + Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + the sum of the weights be 0 for 'mean' or the sum of the squared weights be + 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + all tables. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", name, + sample_indices_or_row_splits, embedding_indices, aggregation_weights, + mode_override, device_ordinal, "combiners", combiners) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback( + sample_indices_or_row_splits, embedding_indices, + aggregation_weights, mode_override, device_ordinal, + combiners=combiners, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_indices_or_row_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices_or_row_splits' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) + _attr_N = len(sample_indices_or_row_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(aggregation_weights), _attr_N)) + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", sample_indices_or_row_splits=sample_indices_or_row_splits, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + device_ordinal=device_ordinal, + combiners=combiners, + name=name) + return _op +DynamicEnqueueTPUEmbeddingArbitraryTensorBatch = tf_export("raw_ops.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch")(_ops.to_raw_op(dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch)) + + +def dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback(sample_indices_or_row_splits: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: Annotated[Any, _atypes.Int32], combiners, name, ctx): + if not isinstance(sample_indices_or_row_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices_or_row_splits' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) + _attr_N = len(sample_indices_or_row_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(aggregation_weights), _attr_N)) + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _attr_T1, sample_indices_or_row_splits = _execute.args_to_matching_eager(list(sample_indices_or_row_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) + _inputs_flat = list(sample_indices_or_row_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override, device_ordinal] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "combiners", combiners) + _result = _execute.execute(b"DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T1 = TypeVar("TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T1", _atypes.Int32, _atypes.Int64) +TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T2 = TypeVar("TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T2", _atypes.Int32, _atypes.Int64) +TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T3 = TypeVar("TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T3", _atypes.Float32, _atypes.Float64) + +def dynamic_enqueue_tpu_embedding_ragged_tensor_batch(sample_splits: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: Annotated[Any, _atypes.Int32], table_ids, combiners=[], max_sequence_lengths=[], num_features=[], name=None): + r"""TODO: add doc. + + Args: + sample_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + embedding_indices: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `int32`, `int64`. + aggregation_weights: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `float32`, `float64`. + mode_override: A `Tensor` of type `string`. + device_ordinal: A `Tensor` of type `int32`. + table_ids: A list of `ints`. + combiners: An optional list of `strings`. Defaults to `[]`. + max_sequence_lengths: An optional list of `ints`. Defaults to `[]`. + num_features: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DynamicEnqueueTPUEmbeddingRaggedTensorBatch", name, + sample_splits, embedding_indices, aggregation_weights, mode_override, + device_ordinal, "combiners", combiners, "table_ids", table_ids, + "max_sequence_lengths", max_sequence_lengths, "num_features", + num_features) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dynamic_enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback( + sample_splits, embedding_indices, aggregation_weights, + mode_override, device_ordinal, combiners=combiners, + table_ids=table_ids, max_sequence_lengths=max_sequence_lengths, + num_features=num_features, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_splits' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) + _attr_N = len(sample_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DynamicEnqueueTPUEmbeddingRaggedTensorBatch", sample_splits=sample_splits, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + device_ordinal=device_ordinal, + table_ids=table_ids, + combiners=combiners, + max_sequence_lengths=max_sequence_lengths, + num_features=num_features, + name=name) + return _op +DynamicEnqueueTPUEmbeddingRaggedTensorBatch = tf_export("raw_ops.DynamicEnqueueTPUEmbeddingRaggedTensorBatch")(_ops.to_raw_op(dynamic_enqueue_tpu_embedding_ragged_tensor_batch)) + + +def dynamic_enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback(sample_splits: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_DynamicEnqueueTPUEmbeddingRaggedTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: Annotated[Any, _atypes.Int32], table_ids, combiners, max_sequence_lengths, num_features, name, ctx): + if not isinstance(sample_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_splits' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) + _attr_N = len(sample_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'dynamic_enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _attr_T1, sample_splits = _execute.args_to_matching_eager(list(sample_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) + _inputs_flat = list(sample_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override, device_ordinal] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "combiners", combiners, "table_ids", table_ids, "max_sequence_lengths", + max_sequence_lengths, "num_features", num_features) + _result = _execute.execute(b"DynamicEnqueueTPUEmbeddingRaggedTensorBatch", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T1 = TypeVar("TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T1", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T2 = TypeVar("TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T2", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T3 = TypeVar("TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T3", _atypes.Float32, _atypes.Float64) + +def enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal:int=-1, combiners=[], name=None): + r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + + embedding_indices[i] and aggregation_weights[i] correspond + to the ith feature. + + The tensors at corresponding positions in the three input lists (sample_indices, + embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + with dim_size() equal to the total number of lookups into the table described by + the corresponding feature. + + Args: + sample_indices_or_row_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 2 Tensors specifying the training example to which the + corresponding embedding_indices and aggregation_weights values belong. + If the size of its first dimension is 0, we assume each embedding_indices + belongs to a different sample. Both int32 and int64 are allowed and will + be converted to int32 internally. + + Or a list of rank 1 Tensors specifying the row splits for splitting + embedding_indices and aggregation_weights into rows. It corresponds to + ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When + enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + the row splits is 1-D dense tensor. When empty, we assume a dense tensor is + passed to the op Both int32 and int64 are allowed and will be converted to + int32 internally. + embedding_indices: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors, indices into the embedding + tables. Both int32 and int64 are allowed and will be converted to + int32 internally. + aggregation_weights: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `float32`, `float64`. + A list of rank 1 Tensors containing per training + example aggregation weights. Both float32 and float64 are allowed and will + be converted to float32 internally. + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + combiners: An optional list of `strings`. Defaults to `[]`. + A list of string scalars, one for each embedding table that specify + how to normalize the embedding activations after weighted summation. + Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + the sum of the weights be 0 for 'mean' or the sum of the squared weights be + 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + all tables. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EnqueueTPUEmbeddingArbitraryTensorBatch", name, + sample_indices_or_row_splits, embedding_indices, aggregation_weights, + mode_override, "device_ordinal", device_ordinal, "combiners", + combiners) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback( + sample_indices_or_row_splits, embedding_indices, + aggregation_weights, mode_override, device_ordinal=device_ordinal, + combiners=combiners, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_indices_or_row_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices_or_row_splits' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) + _attr_N = len(sample_indices_or_row_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(aggregation_weights), _attr_N)) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EnqueueTPUEmbeddingArbitraryTensorBatch", sample_indices_or_row_splits=sample_indices_or_row_splits, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + device_ordinal=device_ordinal, + combiners=combiners, + name=name) + return _op +EnqueueTPUEmbeddingArbitraryTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingArbitraryTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_arbitrary_tensor_batch)) + + +def enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback(sample_indices_or_row_splits: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingArbitraryTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: int, combiners, name, ctx): + if not isinstance(sample_indices_or_row_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices_or_row_splits' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) + _attr_N = len(sample_indices_or_row_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices_or_row_splits'." % + (len(aggregation_weights), _attr_N)) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _attr_T1, sample_indices_or_row_splits = _execute.args_to_matching_eager(list(sample_indices_or_row_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + _inputs_flat = list(sample_indices_or_row_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "device_ordinal", device_ordinal, "combiners", combiners) + _result = _execute.execute(b"EnqueueTPUEmbeddingArbitraryTensorBatch", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def enqueue_tpu_embedding_integer_batch(batch: Annotated[List[Any], _atypes.Int32], mode_override: Annotated[Any, _atypes.String], device_ordinal:int=-1, name=None): + r"""An op that enqueues a list of input batch tensors to TPUEmbedding. + + Args: + batch: A list of at least 1 `Tensor` objects with type `int32`. + A list of 1D tensors, one for each embedding table, containing the + indices into the tables. + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EnqueueTPUEmbeddingIntegerBatch", name, batch, mode_override, + "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return enqueue_tpu_embedding_integer_batch_eager_fallback( + batch, mode_override, device_ordinal=device_ordinal, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(batch, (list, tuple)): + raise TypeError( + "Expected list for 'batch' argument to " + "'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch) + _attr_N = len(batch) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EnqueueTPUEmbeddingIntegerBatch", batch=batch, + mode_override=mode_override, + device_ordinal=device_ordinal, + name=name) + return _op +EnqueueTPUEmbeddingIntegerBatch = tf_export("raw_ops.EnqueueTPUEmbeddingIntegerBatch")(_ops.to_raw_op(enqueue_tpu_embedding_integer_batch)) + + +def enqueue_tpu_embedding_integer_batch_eager_fallback(batch: Annotated[List[Any], _atypes.Int32], mode_override: Annotated[Any, _atypes.String], device_ordinal: int, name, ctx): + if not isinstance(batch, (list, tuple)): + raise TypeError( + "Expected list for 'batch' argument to " + "'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch) + _attr_N = len(batch) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + batch = _ops.convert_n_to_tensor(batch, _dtypes.int32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + _inputs_flat = list(batch) + [mode_override] + _attrs = ("N", _attr_N, "device_ordinal", device_ordinal) + _result = _execute.execute(b"EnqueueTPUEmbeddingIntegerBatch", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_EnqueueTPUEmbeddingRaggedTensorBatch_T1 = TypeVar("TV_EnqueueTPUEmbeddingRaggedTensorBatch_T1", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingRaggedTensorBatch_T2 = TypeVar("TV_EnqueueTPUEmbeddingRaggedTensorBatch_T2", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingRaggedTensorBatch_T3 = TypeVar("TV_EnqueueTPUEmbeddingRaggedTensorBatch_T3", _atypes.Float32, _atypes.Float64) + +def enqueue_tpu_embedding_ragged_tensor_batch(sample_splits: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], table_ids, device_ordinal:int=-1, combiners=[], max_sequence_lengths=[], num_features=[], name=None): + r"""Eases the porting of code that uses tf.nn.embedding_lookup(). + + sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond + to the ith feature. table_ids[i] indicates which embedding table to look up ith + feature. + + The tensors at corresponding positions in two of the input lists, + embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1 + with dim_size() equal to the total number of lookups into the table described by + the corresponding feature. + + Args: + sample_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors specifying the break points for splitting + embedding_indices and aggregation_weights into rows. + It corresponds to ids.row_splits in embedding_lookup(), when ids is a + RaggedTensor. + embedding_indices: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors, indices into the embedding tables. + It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor. + aggregation_weights: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `float32`, `float64`. + A list of rank 1 Tensors containing per training example + aggregation weights. It corresponds to the values field of a RaggedTensor + with the same row_splits as ids in embedding_lookup(), when ids is a + RaggedTensor. + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + table_ids: A list of `ints`. + A list of integers specifying the identifier of the embedding table + (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the + corresponding input. The ith input is looked up using table_ids[i]. The size + of the table_ids list must be equal to that of sample_indices, + embedding_indices and aggregation_weights. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + combiners: An optional list of `strings`. Defaults to `[]`. + A list of string scalars, one for each embedding table that specify + how to normalize the embedding activations after weighted summation. + Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + the sum of the weights be 0 for 'mean' or the sum of the squared weights be + 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + all tables. + max_sequence_lengths: An optional list of `ints`. Defaults to `[]`. + num_features: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EnqueueTPUEmbeddingRaggedTensorBatch", name, sample_splits, + embedding_indices, aggregation_weights, mode_override, + "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", + table_ids, "max_sequence_lengths", max_sequence_lengths, + "num_features", num_features) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback( + sample_splits, embedding_indices, aggregation_weights, + mode_override, device_ordinal=device_ordinal, combiners=combiners, + table_ids=table_ids, max_sequence_lengths=max_sequence_lengths, + num_features=num_features, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_splits' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) + _attr_N = len(sample_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EnqueueTPUEmbeddingRaggedTensorBatch", sample_splits=sample_splits, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + table_ids=table_ids, + device_ordinal=device_ordinal, + combiners=combiners, + max_sequence_lengths=max_sequence_lengths, + num_features=num_features, + name=name) + return _op +EnqueueTPUEmbeddingRaggedTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingRaggedTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_ragged_tensor_batch)) + + +def enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback(sample_splits: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingRaggedTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], table_ids, device_ordinal: int, combiners, max_sequence_lengths, num_features, name, ctx): + if not isinstance(sample_splits, (list, tuple)): + raise TypeError( + "Expected list for 'sample_splits' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) + _attr_N = len(sample_splits) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_splits'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _attr_T1, sample_splits = _execute.args_to_matching_eager(list(sample_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + _inputs_flat = list(sample_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", + table_ids, "max_sequence_lengths", max_sequence_lengths, "num_features", + num_features) + _result = _execute.execute(b"EnqueueTPUEmbeddingRaggedTensorBatch", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_EnqueueTPUEmbeddingSparseBatch_T1 = TypeVar("TV_EnqueueTPUEmbeddingSparseBatch_T1", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingSparseBatch_T2 = TypeVar("TV_EnqueueTPUEmbeddingSparseBatch_T2", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingSparseBatch_T3 = TypeVar("TV_EnqueueTPUEmbeddingSparseBatch_T3", _atypes.Float32, _atypes.Float64) + +def enqueue_tpu_embedding_sparse_batch(sample_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal:int=-1, combiners=[], name=None): + r"""An op that enqueues TPUEmbedding input indices from a SparseTensor. + + This Op eases the porting of code that uses embedding_lookup_sparse(), + although some Python preprocessing of the SparseTensor arguments to + embedding_lookup_sparse() is required to produce the arguments to this Op, + since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training + step. + + The tensors at corresponding positions in the three input lists + must have the same shape, i.e. rank 1 with dim_size() equal to the total + number of lookups into the table described by the corresponding table_id. + + Args: + sample_indices: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors specifying the training example and + feature to which the corresponding embedding_indices and aggregation_weights + values belong. sample_indices[i] must equal b * nf + f, where nf is the + number of features from the corresponding table, f is in [0, nf), and + b is in [0, batch size). + embedding_indices: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors, indices into the embedding tables. + aggregation_weights: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `float32`, `float64`. + A list of rank 1 Tensors containing per sample -- i.e. per + (training example, feature) -- aggregation weights. + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + combiners: An optional list of `strings`. Defaults to `[]`. + A list of string scalars, one for each embedding table that specify + how to normalize the embedding activations after weighted summation. + Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + the sum of the weights be 0 for 'mean' or the sum of the squared weights be + 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + all tables. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EnqueueTPUEmbeddingSparseBatch", name, sample_indices, + embedding_indices, aggregation_weights, mode_override, + "device_ordinal", device_ordinal, "combiners", combiners) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return enqueue_tpu_embedding_sparse_batch_eager_fallback( + sample_indices, embedding_indices, aggregation_weights, + mode_override, device_ordinal=device_ordinal, combiners=combiners, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices) + _attr_N = len(sample_indices) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(aggregation_weights), _attr_N)) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EnqueueTPUEmbeddingSparseBatch", sample_indices=sample_indices, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + device_ordinal=device_ordinal, + combiners=combiners, name=name) + return _op +EnqueueTPUEmbeddingSparseBatch = tf_export("raw_ops.EnqueueTPUEmbeddingSparseBatch")(_ops.to_raw_op(enqueue_tpu_embedding_sparse_batch)) + + +def enqueue_tpu_embedding_sparse_batch_eager_fallback(sample_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseBatch_T3], mode_override: Annotated[Any, _atypes.String], device_ordinal: int, combiners, name, ctx): + if not isinstance(sample_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices) + _attr_N = len(sample_indices) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(aggregation_weights), _attr_N)) + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + _attr_T1, sample_indices = _execute.args_to_matching_eager(list(sample_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + _inputs_flat = list(sample_indices) + list(embedding_indices) + list(aggregation_weights) + [mode_override] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "device_ordinal", device_ordinal, "combiners", combiners) + _result = _execute.execute(b"EnqueueTPUEmbeddingSparseBatch", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_EnqueueTPUEmbeddingSparseTensorBatch_T1 = TypeVar("TV_EnqueueTPUEmbeddingSparseTensorBatch_T1", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingSparseTensorBatch_T2 = TypeVar("TV_EnqueueTPUEmbeddingSparseTensorBatch_T2", _atypes.Int32, _atypes.Int64) +TV_EnqueueTPUEmbeddingSparseTensorBatch_T3 = TypeVar("TV_EnqueueTPUEmbeddingSparseTensorBatch_T3", _atypes.Float32, _atypes.Float64) + +def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], table_ids, device_ordinal:int=-1, combiners=[], max_sequence_lengths=[], num_features=[], name=None): + r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + + sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond + to the ith feature. table_ids[i] indicates which embedding table to look up ith + feature. + + The tensors at corresponding positions in the three input lists (sample_indices, + embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + with dim_size() equal to the total number of lookups into the table described by + the corresponding feature. + + Args: + sample_indices: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors specifying the training example to + which the corresponding embedding_indices and aggregation_weights values + belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). + embedding_indices: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `int32`, `int64`. + A list of rank 1 Tensors, indices into the embedding tables. + It corresponds to sp_ids.values in embedding_lookup_sparse(). + aggregation_weights: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `float32`, `float64`. + A list of rank 1 Tensors containing per training example + aggregation weights. It corresponds to sp_weights.values in + embedding_lookup_sparse(). + mode_override: A `Tensor` of type `string`. + A string input that overrides the mode specified in the + TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + table_ids: A list of `ints`. + A list of integers specifying the identifier of the embedding table + (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the + corresponding input. The ith input is looked up using table_ids[i]. The size + of the table_ids list must be equal to that of sample_indices, + embedding_indices and aggregation_weights. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. Should be >= 0 and less than the number + of TPU cores in the task on which the node is placed. + combiners: An optional list of `strings`. Defaults to `[]`. + A list of string scalars, one for each embedding table that specify + how to normalize the embedding activations after weighted summation. + Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + the sum of the weights be 0 for 'mean' or the sum of the squared weights be + 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + all tables. + max_sequence_lengths: An optional list of `ints`. Defaults to `[]`. + num_features: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "EnqueueTPUEmbeddingSparseTensorBatch", name, sample_indices, + embedding_indices, aggregation_weights, mode_override, + "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", + table_ids, "max_sequence_lengths", max_sequence_lengths, + "num_features", num_features) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return enqueue_tpu_embedding_sparse_tensor_batch_eager_fallback( + sample_indices, embedding_indices, aggregation_weights, + mode_override, device_ordinal=device_ordinal, combiners=combiners, + table_ids=table_ids, max_sequence_lengths=max_sequence_lengths, + num_features=num_features, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(sample_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % sample_indices) + _attr_N = len(sample_indices) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "EnqueueTPUEmbeddingSparseTensorBatch", sample_indices=sample_indices, + embedding_indices=embedding_indices, + aggregation_weights=aggregation_weights, + mode_override=mode_override, + table_ids=table_ids, + device_ordinal=device_ordinal, + combiners=combiners, + max_sequence_lengths=max_sequence_lengths, + num_features=num_features, + name=name) + return _op +EnqueueTPUEmbeddingSparseTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingSparseTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_sparse_tensor_batch)) + + +def enqueue_tpu_embedding_sparse_tensor_batch_eager_fallback(sample_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T1], embedding_indices: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T2], aggregation_weights: Annotated[List[Any], TV_EnqueueTPUEmbeddingSparseTensorBatch_T3], mode_override: Annotated[Any, _atypes.String], table_ids, device_ordinal: int, combiners, max_sequence_lengths, num_features, name, ctx): + if not isinstance(sample_indices, (list, tuple)): + raise TypeError( + "Expected list for 'sample_indices' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % sample_indices) + _attr_N = len(sample_indices) + if not isinstance(embedding_indices, (list, tuple)): + raise TypeError( + "Expected list for 'embedding_indices' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % embedding_indices) + if len(embedding_indices) != _attr_N: + raise ValueError( + "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(embedding_indices), _attr_N)) + if not isinstance(aggregation_weights, (list, tuple)): + raise TypeError( + "Expected list for 'aggregation_weights' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % aggregation_weights) + if len(aggregation_weights) != _attr_N: + raise ValueError( + "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " + "must match length %d of argument 'sample_indices'." % + (len(aggregation_weights), _attr_N)) + if not isinstance(table_ids, (list, tuple)): + raise TypeError( + "Expected list for 'table_ids' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % table_ids) + table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + if combiners is None: + combiners = [] + if not isinstance(combiners, (list, tuple)): + raise TypeError( + "Expected list for 'combiners' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % combiners) + combiners = [_execute.make_str(_s, "combiners") for _s in combiners] + if max_sequence_lengths is None: + max_sequence_lengths = [] + if not isinstance(max_sequence_lengths, (list, tuple)): + raise TypeError( + "Expected list for 'max_sequence_lengths' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % max_sequence_lengths) + max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] + if num_features is None: + num_features = [] + if not isinstance(num_features, (list, tuple)): + raise TypeError( + "Expected list for 'num_features' argument to " + "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % num_features) + num_features = [_execute.make_int(_i, "num_features") for _i in num_features] + _attr_T1, sample_indices = _execute.args_to_matching_eager(list(sample_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) + mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) + _inputs_flat = list(sample_indices) + list(embedding_indices) + list(aggregation_weights) + [mode_override] + _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, + "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", + table_ids, "max_sequence_lengths", max_sequence_lengths, "num_features", + num_features) + _result = _execute.execute(b"EnqueueTPUEmbeddingSparseTensorBatch", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_InfeedDequeue_dtype = TypeVar("TV_InfeedDequeue_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def infeed_dequeue(dtype: TV_InfeedDequeue_dtype, shape, name=None) -> Annotated[Any, TV_InfeedDequeue_dtype]: + r"""A placeholder op for a value that will be fed into the computation. + + Args: + dtype: A `tf.DType`. The type of elements in the tensor. + shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InfeedDequeue", name, "dtype", dtype, "shape", shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return infeed_dequeue_eager_fallback( + dtype=dtype, shape=shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InfeedDequeue", dtype=dtype, shape=shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", + _op.get_attr("shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "InfeedDequeue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +InfeedDequeue = tf_export("raw_ops.InfeedDequeue")(_ops.to_raw_op(infeed_dequeue)) + + +def infeed_dequeue_eager_fallback(dtype: TV_InfeedDequeue_dtype, shape, name, ctx) -> Annotated[Any, TV_InfeedDequeue_dtype]: + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + _inputs_flat = [] + _attrs = ("dtype", dtype, "shape", shape) + _result = _execute.execute(b"InfeedDequeue", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "InfeedDequeue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def infeed_dequeue_tuple(dtypes, shapes, name=None): + r"""Fetches multiple values from infeed as an XLA tuple. + + Args: + dtypes: A list of `tf.DTypes` that has length `>= 1`. + The element types of each element in `outputs`. + shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of each tensor in `outputs`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `dtypes`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InfeedDequeueTuple", name, "dtypes", dtypes, "shapes", shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return infeed_dequeue_tuple_eager_fallback( + dtypes=dtypes, shapes=shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'infeed_dequeue_tuple' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'infeed_dequeue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InfeedDequeueTuple", dtypes=dtypes, shapes=shapes, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", + _op.get_attr("shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "InfeedDequeueTuple", _inputs_flat, _attrs, _result) + return _result + +InfeedDequeueTuple = tf_export("raw_ops.InfeedDequeueTuple")(_ops.to_raw_op(infeed_dequeue_tuple)) + + +def infeed_dequeue_tuple_eager_fallback(dtypes, shapes, name, ctx): + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'infeed_dequeue_tuple' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'infeed_dequeue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + _inputs_flat = [] + _attrs = ("dtypes", dtypes, "shapes", shapes) + _result = _execute.execute(b"InfeedDequeueTuple", len(dtypes), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "InfeedDequeueTuple", _inputs_flat, _attrs, _result) + return _result + + +TV_InfeedEnqueue_dtype = TypeVar("TV_InfeedEnqueue_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def infeed_enqueue(input: Annotated[Any, TV_InfeedEnqueue_dtype], shape=[], layout=[], device_ordinal:int=-1, name=None): + r"""An op which feeds a single Tensor value into the computation. + + Args: + input: A `Tensor`. + A tensor that will be provided using the infeed mechanism. + shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`. + The shape of the tensor. + layout: An optional list of `ints`. Defaults to `[]`. + A vector holding the requested layout in minor-to-major sequence. + If a layout attribute is passed, but its values are all -1, the layout will + be computed by the infeed operation. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. This should be -1 when the Op + is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InfeedEnqueue", name, input, "shape", shape, "layout", layout, + "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return infeed_enqueue_eager_fallback( + input, shape=shape, layout=layout, device_ordinal=device_ordinal, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if shape is None: + shape = [] + shape = _execute.make_shape(shape, "shape") + if layout is None: + layout = [] + if not isinstance(layout, (list, tuple)): + raise TypeError( + "Expected list for 'layout' argument to " + "'infeed_enqueue' Op, not %r." % layout) + layout = [_execute.make_int(_i, "layout") for _i in layout] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InfeedEnqueue", input=input, shape=shape, layout=layout, + device_ordinal=device_ordinal, name=name) + return _op +InfeedEnqueue = tf_export("raw_ops.InfeedEnqueue")(_ops.to_raw_op(infeed_enqueue)) + + +def infeed_enqueue_eager_fallback(input: Annotated[Any, TV_InfeedEnqueue_dtype], shape, layout, device_ordinal: int, name, ctx): + if shape is None: + shape = [] + shape = _execute.make_shape(shape, "shape") + if layout is None: + layout = [] + if not isinstance(layout, (list, tuple)): + raise TypeError( + "Expected list for 'layout' argument to " + "'infeed_enqueue' Op, not %r." % layout) + layout = [_execute.make_int(_i, "layout") for _i in layout] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout, + "device_ordinal", device_ordinal) + _result = _execute.execute(b"InfeedEnqueue", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def infeed_enqueue_prelinearized_buffer(input: Annotated[Any, _atypes.Variant], device_ordinal:int=-1, name=None): + r"""An op which enqueues prelinearized buffer into TPU infeed. + + Args: + input: A `Tensor` of type `variant`. + A variant tensor representing linearized output. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. This should be -1 when the Op is running on a TPU device + and = 0 when the Op is running on the CPU device. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InfeedEnqueuePrelinearizedBuffer", name, input, + "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return infeed_enqueue_prelinearized_buffer_eager_fallback( + input, device_ordinal=device_ordinal, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InfeedEnqueuePrelinearizedBuffer", input=input, + device_ordinal=device_ordinal, + name=name) + return _op +InfeedEnqueuePrelinearizedBuffer = tf_export("raw_ops.InfeedEnqueuePrelinearizedBuffer")(_ops.to_raw_op(infeed_enqueue_prelinearized_buffer)) + + +def infeed_enqueue_prelinearized_buffer_eager_fallback(input: Annotated[Any, _atypes.Variant], device_ordinal: int, name, ctx): + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + input = _ops.convert_to_tensor(input, _dtypes.variant) + _inputs_flat = [input] + _attrs = ("device_ordinal", device_ordinal) + _result = _execute.execute(b"InfeedEnqueuePrelinearizedBuffer", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def infeed_enqueue_tuple(inputs, shapes, layouts=[], device_ordinal:int=-1, name=None): + r"""Feeds multiple Tensor values into the computation as an XLA tuple. + + Args: + inputs: A list of `Tensor` objects. + A list of tensors that will be provided using the infeed mechanism. + shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of each tensor in `inputs`. + layouts: An optional list of `ints`. Defaults to `[]`. + A vector holding the requested layout in minor-to-major sequence for + all the tuple shapes, in the order the shapes appear in the "shapes" input. + The layout elements for a sub-shape can be set to -1, in which case the + corresponding layout will be computed by the infeed operation. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. This should be -1 when the Op + is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "InfeedEnqueueTuple", name, inputs, "shapes", shapes, "layouts", + layouts, "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return infeed_enqueue_tuple_eager_fallback( + inputs, shapes=shapes, layouts=layouts, + device_ordinal=device_ordinal, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'infeed_enqueue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if layouts is None: + layouts = [] + if not isinstance(layouts, (list, tuple)): + raise TypeError( + "Expected list for 'layouts' argument to " + "'infeed_enqueue_tuple' Op, not %r." % layouts) + layouts = [_execute.make_int(_i, "layouts") for _i in layouts] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "InfeedEnqueueTuple", inputs=inputs, shapes=shapes, layouts=layouts, + device_ordinal=device_ordinal, name=name) + return _op +InfeedEnqueueTuple = tf_export("raw_ops.InfeedEnqueueTuple")(_ops.to_raw_op(infeed_enqueue_tuple)) + + +def infeed_enqueue_tuple_eager_fallback(inputs, shapes, layouts, device_ordinal: int, name, ctx): + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'infeed_enqueue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if layouts is None: + layouts = [] + if not isinstance(layouts, (list, tuple)): + raise TypeError( + "Expected list for 'layouts' argument to " + "'infeed_enqueue_tuple' Op, not %r." % layouts) + layouts = [_execute.make_int(_i, "layouts") for _i in layouts] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = list(inputs) + _attrs = ("dtypes", _attr_dtypes, "shapes", shapes, "layouts", layouts, + "device_ordinal", device_ordinal) + _result = _execute.execute(b"InfeedEnqueueTuple", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def is_tpu_embedding_initialized(config:str="", name=None) -> Annotated[Any, _atypes.Bool]: + r"""Whether TPU Embedding is initialized in a distributed TPU system. + + Args: + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsTPUEmbeddingInitialized", name, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return is_tpu_embedding_initialized_eager_fallback( + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsTPUEmbeddingInitialized", config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsTPUEmbeddingInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsTPUEmbeddingInitialized = tf_export("raw_ops.IsTPUEmbeddingInitialized")(_ops.to_raw_op(is_tpu_embedding_initialized)) + + +def is_tpu_embedding_initialized_eager_fallback(config: str, name, ctx) -> Annotated[Any, _atypes.Bool]: + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("config", config) + _result = _execute.execute(b"IsTPUEmbeddingInitialized", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsTPUEmbeddingInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def load_tpu_embedding_adam_parameters(parameters: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], velocities: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load ADAM embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the ADAM optimization algorithm. + momenta: A `Tensor` of type `float32`. + Value of momenta used in the ADAM optimization algorithm. + velocities: A `Tensor` of type `float32`. + Value of velocities used in the ADAM optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingADAMParameters", name, parameters, momenta, + velocities, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_adam_parameters_eager_fallback( + parameters, momenta, velocities, table_id=table_id, + table_name=table_name, num_shards=num_shards, shard_id=shard_id, + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingADAMParameters", parameters=parameters, + momenta=momenta, + velocities=velocities, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingADAMParameters = tf_export("raw_ops.LoadTPUEmbeddingADAMParameters")(_ops.to_raw_op(load_tpu_embedding_adam_parameters)) + + +def load_tpu_embedding_adam_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], velocities: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) + velocities = _ops.convert_to_tensor(velocities, _dtypes.float32) + _inputs_flat = [parameters, momenta, velocities] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingADAMParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_adadelta_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], updates: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load Adadelta embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the Adadelta optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the Adadelta optimization algorithm. + updates: A `Tensor` of type `float32`. + Value of updates used in the Adadelta optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingAdadeltaParameters", name, parameters, + accumulators, updates, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_adadelta_parameters_eager_fallback( + parameters, accumulators, updates, table_id=table_id, + table_name=table_name, num_shards=num_shards, shard_id=shard_id, + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingAdadeltaParameters", parameters=parameters, + accumulators=accumulators, + updates=updates, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingAdadeltaParameters = tf_export("raw_ops.LoadTPUEmbeddingAdadeltaParameters")(_ops.to_raw_op(load_tpu_embedding_adadelta_parameters)) + + +def load_tpu_embedding_adadelta_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], updates: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + updates = _ops.convert_to_tensor(updates, _dtypes.float32) + _inputs_flat = [parameters, accumulators, updates] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingAdadeltaParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_adagrad_momentum_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load Adagrad Momentum embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the Adagrad Momentum optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the Adagrad Momentum optimization algorithm. + momenta: A `Tensor` of type `float32`. + Value of momenta used in the Adagrad Momentum optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingAdagradMomentumParameters", name, parameters, + accumulators, momenta, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_adagrad_momentum_parameters_eager_fallback( + parameters, accumulators, momenta, table_id=table_id, + table_name=table_name, num_shards=num_shards, shard_id=shard_id, + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingAdagradMomentumParameters", parameters=parameters, + accumulators=accumulators, + momenta=momenta, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingAdagradMomentumParameters = tf_export("raw_ops.LoadTPUEmbeddingAdagradMomentumParameters")(_ops.to_raw_op(load_tpu_embedding_adagrad_momentum_parameters)) + + +def load_tpu_embedding_adagrad_momentum_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) + _inputs_flat = [parameters, accumulators, momenta] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingAdagradMomentumParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_adagrad_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load Adagrad embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the Adagrad optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the Adagrad optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingAdagradParameters", name, parameters, + accumulators, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_adagrad_parameters_eager_fallback( + parameters, accumulators, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingAdagradParameters", parameters=parameters, + accumulators=accumulators, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingAdagradParameters = tf_export("raw_ops.LoadTPUEmbeddingAdagradParameters")(_ops.to_raw_op(load_tpu_embedding_adagrad_parameters)) + + +def load_tpu_embedding_adagrad_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + _inputs_flat = [parameters, accumulators] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingAdagradParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_centered_rms_prop_parameters(parameters: Annotated[Any, _atypes.Float32], ms: Annotated[Any, _atypes.Float32], mom: Annotated[Any, _atypes.Float32], mg: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load centered RMSProp embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the centered RMSProp optimization algorithm. + ms: A `Tensor` of type `float32`. + Value of ms used in the centered RMSProp optimization algorithm. + mom: A `Tensor` of type `float32`. + Value of mom used in the centered RMSProp optimization algorithm. + mg: A `Tensor` of type `float32`. + Value of mg used in the centered RMSProp optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingCenteredRMSPropParameters", name, parameters, + ms, mom, mg, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_centered_rms_prop_parameters_eager_fallback( + parameters, ms, mom, mg, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingCenteredRMSPropParameters", parameters=parameters, + ms=ms, mom=mom, mg=mg, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingCenteredRMSPropParameters = tf_export("raw_ops.LoadTPUEmbeddingCenteredRMSPropParameters")(_ops.to_raw_op(load_tpu_embedding_centered_rms_prop_parameters)) + + +def load_tpu_embedding_centered_rms_prop_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], ms: Annotated[Any, _atypes.Float32], mom: Annotated[Any, _atypes.Float32], mg: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + ms = _ops.convert_to_tensor(ms, _dtypes.float32) + mom = _ops.convert_to_tensor(mom, _dtypes.float32) + mg = _ops.convert_to_tensor(mg, _dtypes.float32) + _inputs_flat = [parameters, ms, mom, mg] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingCenteredRMSPropParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_ftrl_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], linears: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load FTRL embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the FTRL optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the FTRL optimization algorithm. + linears: A `Tensor` of type `float32`. + Value of linears used in the FTRL optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingFTRLParameters", name, parameters, + accumulators, linears, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_ftrl_parameters_eager_fallback( + parameters, accumulators, linears, table_id=table_id, + table_name=table_name, num_shards=num_shards, shard_id=shard_id, + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingFTRLParameters", parameters=parameters, + accumulators=accumulators, + linears=linears, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingFTRLParameters = tf_export("raw_ops.LoadTPUEmbeddingFTRLParameters")(_ops.to_raw_op(load_tpu_embedding_ftrl_parameters)) + + +def load_tpu_embedding_ftrl_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], linears: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + linears = _ops.convert_to_tensor(linears, _dtypes.float32) + _inputs_flat = [parameters, accumulators, linears] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingFTRLParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_frequency_estimator_parameters(parameters: Annotated[Any, _atypes.Float32], last_hit_step: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load frequency estimator embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the frequency estimator optimization algorithm. + last_hit_step: A `Tensor` of type `float32`. + Value of last_hit_step used in the frequency estimator optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingFrequencyEstimatorParameters", name, + parameters, last_hit_step, "table_id", table_id, "table_name", + table_name, "num_shards", num_shards, "shard_id", shard_id, "config", + config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_frequency_estimator_parameters_eager_fallback( + parameters, last_hit_step, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingFrequencyEstimatorParameters", parameters=parameters, + last_hit_step=last_hit_step, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + return _op +LoadTPUEmbeddingFrequencyEstimatorParameters = tf_export("raw_ops.LoadTPUEmbeddingFrequencyEstimatorParameters")(_ops.to_raw_op(load_tpu_embedding_frequency_estimator_parameters)) + + +def load_tpu_embedding_frequency_estimator_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], last_hit_step: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + last_hit_step = _ops.convert_to_tensor(last_hit_step, _dtypes.float32) + _inputs_flat = [parameters, last_hit_step] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingFrequencyEstimatorParameters", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_mdl_adagrad_light_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], weights: Annotated[Any, _atypes.Float32], benefits: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load MDL Adagrad Light embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the MDL Adagrad Light optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the MDL Adagrad Light optimization algorithm. + weights: A `Tensor` of type `float32`. + Value of weights used in the MDL Adagrad Light optimization algorithm. + benefits: A `Tensor` of type `float32`. + Value of benefits used in the MDL Adagrad Light optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingMDLAdagradLightParameters", name, parameters, + accumulators, weights, benefits, "table_id", table_id, "table_name", + table_name, "num_shards", num_shards, "shard_id", shard_id, "config", + config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback( + parameters, accumulators, weights, benefits, table_id=table_id, + table_name=table_name, num_shards=num_shards, shard_id=shard_id, + config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingMDLAdagradLightParameters", parameters=parameters, + accumulators=accumulators, + weights=weights, + benefits=benefits, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingMDLAdagradLightParameters = tf_export("raw_ops.LoadTPUEmbeddingMDLAdagradLightParameters")(_ops.to_raw_op(load_tpu_embedding_mdl_adagrad_light_parameters)) + + +def load_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], weights: Annotated[Any, _atypes.Float32], benefits: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + weights = _ops.convert_to_tensor(weights, _dtypes.float32) + benefits = _ops.convert_to_tensor(benefits, _dtypes.float32) + _inputs_flat = [parameters, accumulators, weights, benefits] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingMDLAdagradLightParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_momentum_parameters(parameters: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load Momentum embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the Momentum optimization algorithm. + momenta: A `Tensor` of type `float32`. + Value of momenta used in the Momentum optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingMomentumParameters", name, parameters, momenta, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_momentum_parameters_eager_fallback( + parameters, momenta, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingMomentumParameters", parameters=parameters, + momenta=momenta, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingMomentumParameters = tf_export("raw_ops.LoadTPUEmbeddingMomentumParameters")(_ops.to_raw_op(load_tpu_embedding_momentum_parameters)) + + +def load_tpu_embedding_momentum_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], momenta: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) + _inputs_flat = [parameters, momenta] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingMomentumParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_proximal_adagrad_parameters(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load proximal Adagrad embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the proximal Adagrad optimization algorithm. + accumulators: A `Tensor` of type `float32`. + Value of accumulators used in the proximal Adagrad optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingProximalAdagradParameters", name, parameters, + accumulators, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_proximal_adagrad_parameters_eager_fallback( + parameters, accumulators, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingProximalAdagradParameters", parameters=parameters, + accumulators=accumulators, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingProximalAdagradParameters = tf_export("raw_ops.LoadTPUEmbeddingProximalAdagradParameters")(_ops.to_raw_op(load_tpu_embedding_proximal_adagrad_parameters)) + + +def load_tpu_embedding_proximal_adagrad_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], accumulators: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) + _inputs_flat = [parameters, accumulators] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingProximalAdagradParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_proximal_yogi_parameters(parameters: Annotated[Any, _atypes.Float32], v: Annotated[Any, _atypes.Float32], m: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""TODO: add doc. + + Args: + parameters: A `Tensor` of type `float32`. + v: A `Tensor` of type `float32`. + m: A `Tensor` of type `float32`. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingProximalYogiParameters", name, parameters, v, + m, "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_proximal_yogi_parameters_eager_fallback( + parameters, v, m, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingProximalYogiParameters", parameters=parameters, v=v, + m=m, num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingProximalYogiParameters = tf_export("raw_ops.LoadTPUEmbeddingProximalYogiParameters")(_ops.to_raw_op(load_tpu_embedding_proximal_yogi_parameters)) + + +def load_tpu_embedding_proximal_yogi_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], v: Annotated[Any, _atypes.Float32], m: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + v = _ops.convert_to_tensor(v, _dtypes.float32) + m = _ops.convert_to_tensor(m, _dtypes.float32) + _inputs_flat = [parameters, v, m] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingProximalYogiParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_rms_prop_parameters(parameters: Annotated[Any, _atypes.Float32], ms: Annotated[Any, _atypes.Float32], mom: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load RMSProp embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the RMSProp optimization algorithm. + ms: A `Tensor` of type `float32`. + Value of ms used in the RMSProp optimization algorithm. + mom: A `Tensor` of type `float32`. + Value of mom used in the RMSProp optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingRMSPropParameters", name, parameters, ms, mom, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_rms_prop_parameters_eager_fallback( + parameters, ms, mom, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingRMSPropParameters", parameters=parameters, ms=ms, + mom=mom, num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + return _op +LoadTPUEmbeddingRMSPropParameters = tf_export("raw_ops.LoadTPUEmbeddingRMSPropParameters")(_ops.to_raw_op(load_tpu_embedding_rms_prop_parameters)) + + +def load_tpu_embedding_rms_prop_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], ms: Annotated[Any, _atypes.Float32], mom: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + ms = _ops.convert_to_tensor(ms, _dtypes.float32) + mom = _ops.convert_to_tensor(mom, _dtypes.float32) + _inputs_flat = [parameters, ms, mom] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingRMSPropParameters", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def load_tpu_embedding_stochastic_gradient_descent_parameters(parameters: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Load SGD embedding parameters. + + An op that loads optimization parameters into HBM for embedding. Must be + preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + embedding table configuration. For example, this op is used to install + parameters that are loaded from a checkpoint before a training loop is + executed. + + Args: + parameters: A `Tensor` of type `float32`. + Value of parameters used in the stochastic gradient descent optimization algorithm. + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "LoadTPUEmbeddingStochasticGradientDescentParameters", name, + parameters, "table_id", table_id, "table_name", table_name, + "num_shards", num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return load_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback( + parameters, table_id=table_id, table_name=table_name, + num_shards=num_shards, shard_id=shard_id, config=config, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "LoadTPUEmbeddingStochasticGradientDescentParameters", parameters=parameters, + num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + return _op +LoadTPUEmbeddingStochasticGradientDescentParameters = tf_export("raw_ops.LoadTPUEmbeddingStochasticGradientDescentParameters")(_ops.to_raw_op(load_tpu_embedding_stochastic_gradient_descent_parameters)) + + +def load_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback(parameters: Annotated[Any, _atypes.Float32], num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) + _inputs_flat = [parameters] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"LoadTPUEmbeddingStochasticGradientDescentParameters", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +TV_OutfeedDequeue_dtype = TypeVar("TV_OutfeedDequeue_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def outfeed_dequeue(dtype: TV_OutfeedDequeue_dtype, shape, device_ordinal:int=-1, name=None) -> Annotated[Any, TV_OutfeedDequeue_dtype]: + r"""Retrieves a single tensor from the computation outfeed. + + This operation will block indefinitely until data is available. + + Args: + dtype: A `tf.DType`. The type of elements in the tensor. + shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. This should be -1 when the Op + is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedDequeue", name, "dtype", dtype, "shape", shape, + "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_dequeue_eager_fallback( + dtype=dtype, shape=shape, device_ordinal=device_ordinal, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedDequeue", dtype=dtype, shape=shape, + device_ordinal=device_ordinal, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", + _op.get_attr("shape"), "device_ordinal", + _op._get_attr_int("device_ordinal")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OutfeedDequeue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OutfeedDequeue = tf_export("raw_ops.OutfeedDequeue")(_ops.to_raw_op(outfeed_dequeue)) + + +def outfeed_dequeue_eager_fallback(dtype: TV_OutfeedDequeue_dtype, shape, device_ordinal: int, name, ctx) -> Annotated[Any, TV_OutfeedDequeue_dtype]: + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _inputs_flat = [] + _attrs = ("dtype", dtype, "shape", shape, "device_ordinal", device_ordinal) + _result = _execute.execute(b"OutfeedDequeue", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OutfeedDequeue", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def outfeed_dequeue_tuple(dtypes, shapes, device_ordinal:int=-1, name=None): + r"""Retrieve multiple values from the computation outfeed. + + This operation will block indefinitely until data is available. Output `i` + corresponds to XLA tuple element `i`. + + Args: + dtypes: A list of `tf.DTypes` that has length `>= 1`. + The element types of each element in `outputs`. + shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of each tensor in `outputs`. + device_ordinal: An optional `int`. Defaults to `-1`. + The TPU device to use. This should be -1 when the Op + is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `dtypes`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedDequeueTuple", name, "dtypes", dtypes, "shapes", shapes, + "device_ordinal", device_ordinal) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_dequeue_tuple_eager_fallback( + dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'outfeed_dequeue_tuple' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'outfeed_dequeue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedDequeueTuple", dtypes=dtypes, shapes=shapes, + device_ordinal=device_ordinal, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", + _op.get_attr("shapes"), "device_ordinal", + _op._get_attr_int("device_ordinal")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OutfeedDequeueTuple", _inputs_flat, _attrs, _result) + return _result + +OutfeedDequeueTuple = tf_export("raw_ops.OutfeedDequeueTuple")(_ops.to_raw_op(outfeed_dequeue_tuple)) + + +def outfeed_dequeue_tuple_eager_fallback(dtypes, shapes, device_ordinal: int, name, ctx): + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'outfeed_dequeue_tuple' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'outfeed_dequeue_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if device_ordinal is None: + device_ordinal = -1 + device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") + _inputs_flat = [] + _attrs = ("dtypes", dtypes, "shapes", shapes, "device_ordinal", + device_ordinal) + _result = _execute.execute(b"OutfeedDequeueTuple", len(dtypes), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OutfeedDequeueTuple", _inputs_flat, _attrs, _result) + return _result + + +def outfeed_dequeue_tuple_v2(device_ordinal: Annotated[Any, _atypes.Int32], dtypes, shapes, name=None): + r"""Retrieve multiple values from the computation outfeed. Device ordinal is a +tensor allowing dynamic outfeed. + + This operation will block indefinitely until data is available. Output `i` + corresponds to XLA tuple element `i`. + + Args: + device_ordinal: A `Tensor` of type `int32`. + An int scalar tensor, representing the TPU device to use. This should be -1 when + the Op is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + dtypes: A list of `tf.DTypes` that has length `>= 1`. + The element types of each element in `outputs`. + shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of each tensor in `outputs`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `dtypes`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedDequeueTupleV2", name, device_ordinal, "dtypes", dtypes, + "shapes", shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_dequeue_tuple_v2_eager_fallback( + device_ordinal, dtypes=dtypes, shapes=shapes, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'outfeed_dequeue_tuple_v2' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'outfeed_dequeue_tuple_v2' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedDequeueTupleV2", device_ordinal=device_ordinal, dtypes=dtypes, + shapes=shapes, name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", + _op.get_attr("shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OutfeedDequeueTupleV2", _inputs_flat, _attrs, _result) + return _result + +OutfeedDequeueTupleV2 = tf_export("raw_ops.OutfeedDequeueTupleV2")(_ops.to_raw_op(outfeed_dequeue_tuple_v2)) + + +def outfeed_dequeue_tuple_v2_eager_fallback(device_ordinal: Annotated[Any, _atypes.Int32], dtypes, shapes, name, ctx): + if not isinstance(dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'dtypes' argument to " + "'outfeed_dequeue_tuple_v2' Op, not %r." % dtypes) + dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'outfeed_dequeue_tuple_v2' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) + _inputs_flat = [device_ordinal] + _attrs = ("dtypes", dtypes, "shapes", shapes) + _result = _execute.execute(b"OutfeedDequeueTupleV2", len(dtypes), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OutfeedDequeueTupleV2", _inputs_flat, _attrs, _result) + return _result + + +TV_OutfeedDequeueV2_dtype = TypeVar("TV_OutfeedDequeueV2_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def outfeed_dequeue_v2(device_ordinal: Annotated[Any, _atypes.Int32], dtype: TV_OutfeedDequeueV2_dtype, shape, name=None) -> Annotated[Any, TV_OutfeedDequeueV2_dtype]: + r"""Retrieves a single tensor from the computation outfeed. Device ordinal is a +tensor allowing dynamic outfeed. + + This operation will block indefinitely until data is available. + + Args: + device_ordinal: A `Tensor` of type `int32`. + An int scalar tensor, representing the TPU device to use. This should be -1 when + the Op is running on a TPU device, and >= 0 when the Op is running on the CPU + device. + dtype: A `tf.DType`. The type of elements in the tensor. + shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedDequeueV2", name, device_ordinal, "dtype", dtype, + "shape", shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_dequeue_v2_eager_fallback( + device_ordinal, dtype=dtype, shape=shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedDequeueV2", device_ordinal=device_ordinal, dtype=dtype, + shape=shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", + _op.get_attr("shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "OutfeedDequeueV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +OutfeedDequeueV2 = tf_export("raw_ops.OutfeedDequeueV2")(_ops.to_raw_op(outfeed_dequeue_v2)) + + +def outfeed_dequeue_v2_eager_fallback(device_ordinal: Annotated[Any, _atypes.Int32], dtype: TV_OutfeedDequeueV2_dtype, shape, name, ctx) -> Annotated[Any, TV_OutfeedDequeueV2_dtype]: + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) + _inputs_flat = [device_ordinal] + _attrs = ("dtype", dtype, "shape", shape) + _result = _execute.execute(b"OutfeedDequeueV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "OutfeedDequeueV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_OutfeedEnqueue_dtype = TypeVar("TV_OutfeedEnqueue_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def outfeed_enqueue(input: Annotated[Any, TV_OutfeedEnqueue_dtype], name=None): + r"""Enqueue a Tensor on the computation outfeed. + + Args: + input: A `Tensor`. A tensor that will be inserted into the outfeed queue. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedEnqueue", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_enqueue_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedEnqueue", input=input, name=name) + return _op +OutfeedEnqueue = tf_export("raw_ops.OutfeedEnqueue")(_ops.to_raw_op(outfeed_enqueue)) + + +def outfeed_enqueue_eager_fallback(input: Annotated[Any, TV_OutfeedEnqueue_dtype], name, ctx): + _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("dtype", _attr_dtype) + _result = _execute.execute(b"OutfeedEnqueue", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +def outfeed_enqueue_tuple(inputs, name=None): + r"""Enqueue multiple Tensor values on the computation outfeed. + + Args: + inputs: A list of `Tensor` objects. + A list of tensors that will be inserted into the outfeed queue as an + XLA tuple. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "OutfeedEnqueueTuple", name, inputs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return outfeed_enqueue_tuple_eager_fallback( + inputs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "OutfeedEnqueueTuple", inputs=inputs, name=name) + return _op +OutfeedEnqueueTuple = tf_export("raw_ops.OutfeedEnqueueTuple")(_ops.to_raw_op(outfeed_enqueue_tuple)) + + +def outfeed_enqueue_tuple_eager_fallback(inputs, name, ctx): + _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = list(inputs) + _attrs = ("dtypes", _attr_dtypes) + _result = _execute.execute(b"OutfeedEnqueueTuple", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_Prelinearize_dtype = TypeVar("TV_Prelinearize_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def prelinearize(input: Annotated[Any, TV_Prelinearize_dtype], shape=[], layout=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""An op which linearizes one Tensor value to an opaque variant tensor. + + Args: + input: A `Tensor`. A tensor that will be linearized. + shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`. + The shape of the tensor. + layout: An optional list of `ints`. Defaults to `[]`. + A vector holding the requested layout in minor-to-major sequence. If a layout + attribute is passed but its values are all -1 the layout will be computed by + the infeed operation. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "Prelinearize", name, input, "shape", shape, "layout", layout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return prelinearize_eager_fallback( + input, shape=shape, layout=layout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if shape is None: + shape = [] + shape = _execute.make_shape(shape, "shape") + if layout is None: + layout = [] + if not isinstance(layout, (list, tuple)): + raise TypeError( + "Expected list for 'layout' argument to " + "'prelinearize' Op, not %r." % layout) + layout = [_execute.make_int(_i, "layout") for _i in layout] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "Prelinearize", input=input, shape=shape, layout=layout, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", + _op.get_attr("shape"), "layout", _op.get_attr("layout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "Prelinearize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +Prelinearize = tf_export("raw_ops.Prelinearize")(_ops.to_raw_op(prelinearize)) + + +def prelinearize_eager_fallback(input: Annotated[Any, TV_Prelinearize_dtype], shape, layout, name, ctx) -> Annotated[Any, _atypes.Variant]: + if shape is None: + shape = [] + shape = _execute.make_shape(shape, "shape") + if layout is None: + layout = [] + if not isinstance(layout, (list, tuple)): + raise TypeError( + "Expected list for 'layout' argument to " + "'prelinearize' Op, not %r." % layout) + layout = [_execute.make_int(_i, "layout") for _i in layout] + _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout) + _result = _execute.execute(b"Prelinearize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "Prelinearize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def prelinearize_tuple(inputs, shapes, layouts=[], name=None) -> Annotated[Any, _atypes.Variant]: + r"""An op which linearizes multiple Tensor values to an opaque variant tensor. + + Args: + inputs: A list of `Tensor` objects. + A list of tensors that will be provided using the infeed mechanism. + shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + The shapes of each tensor in `inputs`. + layouts: An optional list of `ints`. Defaults to `[]`. + A vector holding the requested layout in minor-to-major sequence for all the + tuple shapes in the order the shapes appear in the "shapes" input. The layout + elements for a sub-shape can be set to -1 in which case the corresponding layout + will be computed by the infeed operation. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "PrelinearizeTuple", name, inputs, "shapes", shapes, "layouts", + layouts) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return prelinearize_tuple_eager_fallback( + inputs, shapes=shapes, layouts=layouts, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'prelinearize_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if layouts is None: + layouts = [] + if not isinstance(layouts, (list, tuple)): + raise TypeError( + "Expected list for 'layouts' argument to " + "'prelinearize_tuple' Op, not %r." % layouts) + layouts = [_execute.make_int(_i, "layouts") for _i in layouts] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "PrelinearizeTuple", inputs=inputs, shapes=shapes, layouts=layouts, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", + _op.get_attr("shapes"), "layouts", _op.get_attr("layouts")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "PrelinearizeTuple", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +PrelinearizeTuple = tf_export("raw_ops.PrelinearizeTuple")(_ops.to_raw_op(prelinearize_tuple)) + + +def prelinearize_tuple_eager_fallback(inputs, shapes, layouts, name, ctx) -> Annotated[Any, _atypes.Variant]: + if not isinstance(shapes, (list, tuple)): + raise TypeError( + "Expected list for 'shapes' argument to " + "'prelinearize_tuple' Op, not %r." % shapes) + shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] + if layouts is None: + layouts = [] + if not isinstance(layouts, (list, tuple)): + raise TypeError( + "Expected list for 'layouts' argument to " + "'prelinearize_tuple' Op, not %r." % layouts) + layouts = [_execute.make_int(_i, "layouts") for _i in layouts] + _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = list(inputs) + _attrs = ("dtypes", _attr_dtypes, "shapes", shapes, "layouts", layouts) + _result = _execute.execute(b"PrelinearizeTuple", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "PrelinearizeTuple", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_ReadVariableXlaSplitND_T = TypeVar("TV_ReadVariableXlaSplitND_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def read_variable_xla_split_nd(resource: Annotated[Any, _atypes.Resource], T: TV_ReadVariableXlaSplitND_T, N: int, num_splits, paddings=[], name=None): + r"""Splits resource variable input tensor across all dimensions. + + An op which splits the resource variable input tensor based on the given + num_splits attribute, pads slices optionally, and returned the slices. Slices + are returned in row-major order. + + This op may be generated via the TPU bridge. + + For example, with `input` tensor: + ``` + [[0, 1, 2], + [3, 4, 5], + [6, 7, 8]] + ``` + `num_splits`: + ``` + [2, 2] + ``` + and `paddings`: + ``` + [1, 1] + ``` + the expected `outputs` is: + ``` + [[0, 1], + [3, 4]] + [[2, 0], + [5, 0]] + [[6, 7], + [0, 0]] + [[8, 0], + [0, 0]] + ``` + + Args: + resource: A `Tensor` of type `resource`. + Resource variable of input tensor to split across all dimensions. + } + out_arg { + name: "outputs" + description: <= 1`. + num_splits: A list of `ints`. + Number of ways to split per dimension. Shape dimensions must be evenly + divisible. + paddings: An optional list of `ints`. Defaults to `[]`. + Optional list of right paddings per dimension of input tensor to apply before + splitting. This can be used to make a dimension evenly divisible. + name: A name for the operation (optional). + + Returns: + A list of `N` `Tensor` objects with type `T`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ReadVariableXlaSplitND", name, resource, "T", T, "N", N, + "num_splits", num_splits, "paddings", paddings) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return read_variable_xla_split_nd_eager_fallback( + resource, T=T, N=N, num_splits=num_splits, paddings=paddings, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + T = _execute.make_type(T, "T") + N = _execute.make_int(N, "N") + if not isinstance(num_splits, (list, tuple)): + raise TypeError( + "Expected list for 'num_splits' argument to " + "'read_variable_xla_split_nd' Op, not %r." % num_splits) + num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] + if paddings is None: + paddings = [] + if not isinstance(paddings, (list, tuple)): + raise TypeError( + "Expected list for 'paddings' argument to " + "'read_variable_xla_split_nd' Op, not %r." % paddings) + paddings = [_execute.make_int(_i, "paddings") for _i in paddings] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ReadVariableXlaSplitND", resource=resource, T=T, N=N, + num_splits=num_splits, paddings=paddings, + name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "N", _op._get_attr_int("N"), + "num_splits", _op.get_attr("num_splits"), "paddings", + _op.get_attr("paddings")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "ReadVariableXlaSplitND", _inputs_flat, _attrs, _result) + return _result + +ReadVariableXlaSplitND = tf_export("raw_ops.ReadVariableXlaSplitND")(_ops.to_raw_op(read_variable_xla_split_nd)) + + +def read_variable_xla_split_nd_eager_fallback(resource: Annotated[Any, _atypes.Resource], T: TV_ReadVariableXlaSplitND_T, N: int, num_splits, paddings, name, ctx): + T = _execute.make_type(T, "T") + N = _execute.make_int(N, "N") + if not isinstance(num_splits, (list, tuple)): + raise TypeError( + "Expected list for 'num_splits' argument to " + "'read_variable_xla_split_nd' Op, not %r." % num_splits) + num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] + if paddings is None: + paddings = [] + if not isinstance(paddings, (list, tuple)): + raise TypeError( + "Expected list for 'paddings' argument to " + "'read_variable_xla_split_nd' Op, not %r." % paddings) + paddings = [_execute.make_int(_i, "paddings") for _i in paddings] + resource = _ops.convert_to_tensor(resource, _dtypes.resource) + _inputs_flat = [resource] + _attrs = ("T", T, "N", N, "num_splits", num_splits, "paddings", paddings) + _result = _execute.execute(b"ReadVariableXlaSplitND", N, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "ReadVariableXlaSplitND", _inputs_flat, _attrs, _result) + return _result + + +def recv_tpu_embedding_activations(num_outputs: int, config: str, name=None): + r"""An op that receives embedding activations on the TPU. + + The TPU system performs the embedding lookups and aggregations specified by + the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The + results of these aggregations are visible to the Tensorflow Graph as the + outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing + one Tensor of activations per table specified in the model. There can be at + most one RecvTPUEmbeddingActivations op in the TPU graph. + + Args: + num_outputs: An `int` that is `>= 1`. + The number of output activation tensors, equal to the number of + embedding tables in the model. + config: A `string`. Serialized TPUEmbeddingConfiguration proto. + name: A name for the operation (optional). + + Returns: + A list of `num_outputs` `Tensor` objects with type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RecvTPUEmbeddingActivations", name, "num_outputs", num_outputs, + "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return recv_tpu_embedding_activations_eager_fallback( + num_outputs=num_outputs, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_outputs = _execute.make_int(num_outputs, "num_outputs") + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RecvTPUEmbeddingActivations", num_outputs=num_outputs, config=config, + name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("num_outputs", _op._get_attr_int("num_outputs"), "config", + _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RecvTPUEmbeddingActivations", _inputs_flat, _attrs, _result) + return _result + +RecvTPUEmbeddingActivations = tf_export("raw_ops.RecvTPUEmbeddingActivations")(_ops.to_raw_op(recv_tpu_embedding_activations)) + + +def recv_tpu_embedding_activations_eager_fallback(num_outputs: int, config: str, name, ctx): + num_outputs = _execute.make_int(num_outputs, "num_outputs") + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("num_outputs", num_outputs, "config", config) + _result = _execute.execute(b"RecvTPUEmbeddingActivations", num_outputs, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RecvTPUEmbeddingActivations", _inputs_flat, _attrs, _result) + return _result + +_RetrieveTPUEmbeddingADAMParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingADAMParameters", + ["parameters", "momenta", "velocities"]) + + +def retrieve_tpu_embedding_adam_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve ADAM embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, momenta, velocities). + + parameters: A `Tensor` of type `float32`. + momenta: A `Tensor` of type `float32`. + velocities: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingADAMParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_adam_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingADAMParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingADAMParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingADAMParameters = tf_export("raw_ops.RetrieveTPUEmbeddingADAMParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adam_parameters)) + + +def retrieve_tpu_embedding_adam_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingADAMParameters", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingADAMParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingAdadeltaParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingAdadeltaParameters", + ["parameters", "accumulators", "updates"]) + + +def retrieve_tpu_embedding_adadelta_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve Adadelta embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators, updates). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + updates: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingAdadeltaParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_adadelta_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingAdadeltaParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingAdadeltaParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingAdadeltaParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdadeltaParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adadelta_parameters)) + + +def retrieve_tpu_embedding_adadelta_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingAdadeltaParameters", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingAdadeltaParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingAdagradMomentumParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingAdagradMomentumParameters", + ["parameters", "accumulators", "momenta"]) + + +def retrieve_tpu_embedding_adagrad_momentum_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve Adagrad Momentum embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators, momenta). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + momenta: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingAdagradMomentumParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_adagrad_momentum_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingAdagradMomentumParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingAdagradMomentumParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingAdagradMomentumParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdagradMomentumParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adagrad_momentum_parameters)) + + +def retrieve_tpu_embedding_adagrad_momentum_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingAdagradMomentumParameters", + 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingAdagradMomentumParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingAdagradParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingAdagradParameters", + ["parameters", "accumulators"]) + + +def retrieve_tpu_embedding_adagrad_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve Adagrad embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingAdagradParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_adagrad_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingAdagradParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingAdagradParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingAdagradParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdagradParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adagrad_parameters)) + + +def retrieve_tpu_embedding_adagrad_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingAdagradParameters", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingAdagradParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingCenteredRMSPropParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingCenteredRMSPropParameters", + ["parameters", "ms", "mom", "mg"]) + + +def retrieve_tpu_embedding_centered_rms_prop_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve centered RMSProp embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, ms, mom, mg). + + parameters: A `Tensor` of type `float32`. + ms: A `Tensor` of type `float32`. + mom: A `Tensor` of type `float32`. + mg: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingCenteredRMSPropParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingCenteredRMSPropParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingCenteredRMSPropParameters = tf_export("raw_ops.RetrieveTPUEmbeddingCenteredRMSPropParameters")(_ops.to_raw_op(retrieve_tpu_embedding_centered_rms_prop_parameters)) + + +def retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingCenteredRMSPropParameters", + 4, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingFTRLParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingFTRLParameters", + ["parameters", "accumulators", "linears"]) + + +def retrieve_tpu_embedding_ftrl_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve FTRL embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators, linears). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + linears: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingFTRLParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_ftrl_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingFTRLParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingFTRLParameters = tf_export("raw_ops.RetrieveTPUEmbeddingFTRLParameters")(_ops.to_raw_op(retrieve_tpu_embedding_ftrl_parameters)) + + +def retrieve_tpu_embedding_ftrl_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingFTRLParameters", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingFrequencyEstimatorParameters", + ["parameters", "last_hit_step"]) + + +def retrieve_tpu_embedding_frequency_estimator_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve frequency estimator embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, last_hit_step). + + parameters: A `Tensor` of type `float32`. + last_hit_step: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingFrequencyEstimatorParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_frequency_estimator_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingFrequencyEstimatorParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingFrequencyEstimatorParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingFrequencyEstimatorParameters = tf_export("raw_ops.RetrieveTPUEmbeddingFrequencyEstimatorParameters")(_ops.to_raw_op(retrieve_tpu_embedding_frequency_estimator_parameters)) + + +def retrieve_tpu_embedding_frequency_estimator_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingFrequencyEstimatorParameters", + 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingFrequencyEstimatorParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingMDLAdagradLightParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingMDLAdagradLightParameters", + ["parameters", "accumulators", "weights", "benefits"]) + + +def retrieve_tpu_embedding_mdl_adagrad_light_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve MDL Adagrad Light embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators, weights, benefits). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + weights: A `Tensor` of type `float32`. + benefits: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingMDLAdagradLightParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingMDLAdagradLightParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingMDLAdagradLightParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingMDLAdagradLightParameters = tf_export("raw_ops.RetrieveTPUEmbeddingMDLAdagradLightParameters")(_ops.to_raw_op(retrieve_tpu_embedding_mdl_adagrad_light_parameters)) + + +def retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingMDLAdagradLightParameters", + 4, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingMDLAdagradLightParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingMomentumParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingMomentumParameters", + ["parameters", "momenta"]) + + +def retrieve_tpu_embedding_momentum_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve Momentum embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, momenta). + + parameters: A `Tensor` of type `float32`. + momenta: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingMomentumParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_momentum_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingMomentumParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingMomentumParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingMomentumParameters = tf_export("raw_ops.RetrieveTPUEmbeddingMomentumParameters")(_ops.to_raw_op(retrieve_tpu_embedding_momentum_parameters)) + + +def retrieve_tpu_embedding_momentum_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingMomentumParameters", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingMomentumParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingProximalAdagradParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingProximalAdagradParameters", + ["parameters", "accumulators"]) + + +def retrieve_tpu_embedding_proximal_adagrad_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve proximal Adagrad embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, accumulators). + + parameters: A `Tensor` of type `float32`. + accumulators: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingProximalAdagradParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_proximal_adagrad_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingProximalAdagradParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingProximalAdagradParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingProximalAdagradParameters = tf_export("raw_ops.RetrieveTPUEmbeddingProximalAdagradParameters")(_ops.to_raw_op(retrieve_tpu_embedding_proximal_adagrad_parameters)) + + +def retrieve_tpu_embedding_proximal_adagrad_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingProximalAdagradParameters", + 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingProximalAdagradParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingProximalYogiParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingProximalYogiParameters", + ["parameters", "v", "m"]) + + +def retrieve_tpu_embedding_proximal_yogi_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""TODO: add doc. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, v, m). + + parameters: A `Tensor` of type `float32`. + v: A `Tensor` of type `float32`. + m: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingProximalYogiParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_proximal_yogi_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingProximalYogiParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingProximalYogiParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingProximalYogiParameters = tf_export("raw_ops.RetrieveTPUEmbeddingProximalYogiParameters")(_ops.to_raw_op(retrieve_tpu_embedding_proximal_yogi_parameters)) + + +def retrieve_tpu_embedding_proximal_yogi_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingProximalYogiParameters", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingProximalYogiParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) + return _result + +_RetrieveTPUEmbeddingRMSPropParametersOutput = collections.namedtuple( + "RetrieveTPUEmbeddingRMSPropParameters", + ["parameters", "ms", "mom"]) + + +def retrieve_tpu_embedding_rms_prop_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None): + r"""Retrieve RMSProp embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (parameters, ms, mom). + + parameters: A `Tensor` of type `float32`. + ms: A `Tensor` of type `float32`. + mom: A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingRMSPropParameters", name, "table_id", + table_id, "table_name", table_name, "num_shards", num_shards, + "shard_id", shard_id, "config", config) + _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_rms_prop_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingRMSPropParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingRMSPropParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) + return _result + +RetrieveTPUEmbeddingRMSPropParameters = tf_export("raw_ops.RetrieveTPUEmbeddingRMSPropParameters")(_ops.to_raw_op(retrieve_tpu_embedding_rms_prop_parameters)) + + +def retrieve_tpu_embedding_rms_prop_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx): + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingRMSPropParameters", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingRMSPropParameters", _inputs_flat, _attrs, _result) + _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) + return _result + + +def retrieve_tpu_embedding_stochastic_gradient_descent_parameters(num_shards: int, shard_id: int, table_id:int=-1, table_name:str="", config:str="", name=None) -> Annotated[Any, _atypes.Float32]: + r"""Retrieve SGD embedding parameters. + + An op that retrieves optimization parameters from embedding to host + memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + the correct embedding table configuration. For example, this op is + used to retrieve updated parameters before saving a checkpoint. + + Args: + num_shards: An `int`. + shard_id: An `int`. + table_id: An optional `int`. Defaults to `-1`. + table_name: An optional `string`. Defaults to `""`. + config: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RetrieveTPUEmbeddingStochasticGradientDescentParameters", name, + "table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback( + table_id=table_id, table_name=table_name, num_shards=num_shards, + shard_id=shard_id, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RetrieveTPUEmbeddingStochasticGradientDescentParameters", num_shards=num_shards, + shard_id=shard_id, + table_id=table_id, + table_name=table_name, + config=config, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", + _op.get_attr("table_name"), "num_shards", + _op._get_attr_int("num_shards"), "shard_id", + _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RetrieveTPUEmbeddingStochasticGradientDescentParameters", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RetrieveTPUEmbeddingStochasticGradientDescentParameters = tf_export("raw_ops.RetrieveTPUEmbeddingStochasticGradientDescentParameters")(_ops.to_raw_op(retrieve_tpu_embedding_stochastic_gradient_descent_parameters)) + + +def retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback(num_shards: int, shard_id: int, table_id: int, table_name: str, config: str, name, ctx) -> Annotated[Any, _atypes.Float32]: + num_shards = _execute.make_int(num_shards, "num_shards") + shard_id = _execute.make_int(shard_id, "shard_id") + if table_id is None: + table_id = -1 + table_id = _execute.make_int(table_id, "table_id") + if table_name is None: + table_name = "" + table_name = _execute.make_str(table_name, "table_name") + if config is None: + config = "" + config = _execute.make_str(config, "config") + _inputs_flat = [] + _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", + num_shards, "shard_id", shard_id, "config", config) + _result = _execute.execute(b"RetrieveTPUEmbeddingStochasticGradientDescentParameters", + 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RetrieveTPUEmbeddingStochasticGradientDescentParameters", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def send_tpu_embedding_gradients(inputs: Annotated[List[Any], _atypes.Float32], learning_rates: Annotated[List[Any], _atypes.Float32], config: str, name=None): + r"""Performs gradient updates of embedding tables. + + Args: + inputs: A list of at least 1 `Tensor` objects with type `float32`. + A TensorList of gradients with which to update embedding tables. + This argument has the same length and shapes as the return value of + RecvTPUEmbeddingActivations, but contains gradients of the model's loss + with respect to the embedding activations. The embedding tables are updated + from these gradients via the optimizer specified in the TPU embedding + configuration given to tpu.initialize_system. + learning_rates: A list of `Tensor` objects with type `float32`. + A TensorList of float32 scalars, one for each dynamic learning + rate tag: see the comments in + //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. + Multiple tables can share the same dynamic learning rate tag as specified + in the configuration. If the learning rates for all tables are constant, + this list should be empty. + config: A `string`. Serialized TPUEmbeddingConfiguration proto. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SendTPUEmbeddingGradients", name, inputs, learning_rates, + "config", config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return send_tpu_embedding_gradients_eager_fallback( + inputs, learning_rates, config=config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'send_tpu_embedding_gradients' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(learning_rates, (list, tuple)): + raise TypeError( + "Expected list for 'learning_rates' argument to " + "'send_tpu_embedding_gradients' Op, not %r." % learning_rates) + _attr_NN = len(learning_rates) + config = _execute.make_str(config, "config") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SendTPUEmbeddingGradients", inputs=inputs, + learning_rates=learning_rates, + config=config, name=name) + return _op +SendTPUEmbeddingGradients = tf_export("raw_ops.SendTPUEmbeddingGradients")(_ops.to_raw_op(send_tpu_embedding_gradients)) + + +def send_tpu_embedding_gradients_eager_fallback(inputs: Annotated[List[Any], _atypes.Float32], learning_rates: Annotated[List[Any], _atypes.Float32], config: str, name, ctx): + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'send_tpu_embedding_gradients' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(learning_rates, (list, tuple)): + raise TypeError( + "Expected list for 'learning_rates' argument to " + "'send_tpu_embedding_gradients' Op, not %r." % learning_rates) + _attr_NN = len(learning_rates) + config = _execute.make_str(config, "config") + inputs = _ops.convert_n_to_tensor(inputs, _dtypes.float32) + learning_rates = _ops.convert_n_to_tensor(learning_rates, _dtypes.float32) + _inputs_flat = list(inputs) + list(learning_rates) + _attrs = ("N", _attr_N, "NN", _attr_NN, "config", config) + _result = _execute.execute(b"SendTPUEmbeddingGradients", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def shutdown_distributed_tpu(name=None): + r"""Shuts down a running distributed TPU system. + + The op returns an error if no system is running. + + Args: + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "ShutdownDistributedTPU", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return shutdown_distributed_tpu_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "ShutdownDistributedTPU", name=name) + return _op +ShutdownDistributedTPU = tf_export("raw_ops.ShutdownDistributedTPU")(_ops.to_raw_op(shutdown_distributed_tpu)) + + +def shutdown_distributed_tpu_eager_fallback(name, ctx): + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"ShutdownDistributedTPU", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def tpu_compilation_result(name=None) -> Annotated[Any, _atypes.String]: + r"""Returns the result of a TPU compilation. + + This operation returns the result of a TPU compilation as a serialized + CompilationResultProto, which holds a status and an error message if an error + occurred during compilation. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUCompilationResult", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_compilation_result_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUCompilationResult", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUCompilationResult", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUCompilationResult = tf_export("raw_ops.TPUCompilationResult")(_ops.to_raw_op(tpu_compilation_result)) + + +def tpu_compilation_result_eager_fallback(name, ctx) -> Annotated[Any, _atypes.String]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"TPUCompilationResult", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUCompilationResult", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tpu_embedding_activations(embedding_variable: Annotated[Any, _atypes.Float32], sliced_activations: Annotated[Any, _atypes.Float32], table_id: int, lookup_id: int, name=None) -> Annotated[Any, _atypes.Float32]: + r"""An op enabling differentiation of TPU Embeddings. + + This op simply returns its first input, which is assumed to have been sliced + from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of + this op, and its first argument being a trainable Variable, enables automatic + differentiation of graphs containing embeddings via the TPU Embedding Python + libraries. + + Args: + embedding_variable: A `Tensor` of type `float32`. + A trainable variable, enabling optimizers to find this op. + sliced_activations: A `Tensor` of type `float32`. + The embedding activations Tensor to return. + table_id: An `int` that is `>= 0`. + The id of the table in the embedding layer configuration from which + these activations were computed. + lookup_id: An `int` that is `>= 0`. + Identifier of the set of embedding indices which produced these + activations. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUEmbeddingActivations", name, embedding_variable, + sliced_activations, "table_id", table_id, "lookup_id", lookup_id) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_embedding_activations_eager_fallback( + embedding_variable, sliced_activations, table_id=table_id, + lookup_id=lookup_id, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + table_id = _execute.make_int(table_id, "table_id") + lookup_id = _execute.make_int(lookup_id, "lookup_id") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUEmbeddingActivations", embedding_variable=embedding_variable, + sliced_activations=sliced_activations, + table_id=table_id, lookup_id=lookup_id, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("table_id", _op._get_attr_int("table_id"), "lookup_id", + _op._get_attr_int("lookup_id")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUEmbeddingActivations", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUEmbeddingActivations = tf_export("raw_ops.TPUEmbeddingActivations")(_ops.to_raw_op(tpu_embedding_activations)) + + +def tpu_embedding_activations_eager_fallback(embedding_variable: Annotated[Any, _atypes.Float32], sliced_activations: Annotated[Any, _atypes.Float32], table_id: int, lookup_id: int, name, ctx) -> Annotated[Any, _atypes.Float32]: + table_id = _execute.make_int(table_id, "table_id") + lookup_id = _execute.make_int(lookup_id, "lookup_id") + embedding_variable = _ops.convert_to_tensor(embedding_variable, _dtypes.float32) + sliced_activations = _ops.convert_to_tensor(sliced_activations, _dtypes.float32) + _inputs_flat = [embedding_variable, sliced_activations] + _attrs = ("table_id", table_id, "lookup_id", lookup_id) + _result = _execute.execute(b"TPUEmbeddingActivations", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUEmbeddingActivations", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tpu_ordinal_selector(name=None) -> Annotated[Any, _atypes.Int32]: + r"""A TPU core selector Op. + + This Op produces a set of TPU cores (for warm-up) or a single TPU core + (for regular inference) to execute the TPU program on. The output is + consumed by TPUPartitionedCall. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUOrdinalSelector", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_ordinal_selector_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUOrdinalSelector", name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUOrdinalSelector", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUOrdinalSelector = tf_export("raw_ops.TPUOrdinalSelector")(_ops.to_raw_op(tpu_ordinal_selector)) + + +def tpu_ordinal_selector_eager_fallback(name, ctx) -> Annotated[Any, _atypes.Int32]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"TPUOrdinalSelector", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUOrdinalSelector", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def tpu_partitioned_call(args, device_ordinal: Annotated[Any, _atypes.Int32], Tout, f, autotuner_thresh:int=0, name=None): + r"""Calls a function placed on a specified TPU device. + + Args: + args: A list of `Tensor` objects. The arguments to the function. + device_ordinal: A `Tensor` of type `int32`. + The TPU device ordinal to run the function on. + Tout: A list of `tf.DTypes`. The types of the outputs of the function. + f: A function decorated with @Defun. The function to call. + autotuner_thresh: An optional `int`. Defaults to `0`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUPartitionedCall", name, args, device_ordinal, "Tout", Tout, + "f", f, "autotuner_thresh", autotuner_thresh) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_partitioned_call_eager_fallback( + args, device_ordinal, Tout=Tout, f=f, + autotuner_thresh=autotuner_thresh, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'tpu_partitioned_call' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if autotuner_thresh is None: + autotuner_thresh = 0 + autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUPartitionedCall", args=args, device_ordinal=device_ordinal, + Tout=Tout, f=f, + autotuner_thresh=autotuner_thresh, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", + _op.get_attr("f"), "autotuner_thresh", + _op._get_attr_int("autotuner_thresh")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUPartitionedCall", _inputs_flat, _attrs, _result) + return _result + +TPUPartitionedCall = tf_export("raw_ops.TPUPartitionedCall")(_ops.to_raw_op(tpu_partitioned_call)) + + +def tpu_partitioned_call_eager_fallback(args, device_ordinal: Annotated[Any, _atypes.Int32], Tout, f, autotuner_thresh: int, name, ctx): + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'tpu_partitioned_call' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if autotuner_thresh is None: + autotuner_thresh = 0 + autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh") + _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx) + device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) + _inputs_flat = list(args) + [device_ordinal] + _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "autotuner_thresh", + autotuner_thresh) + _result = _execute.execute(b"TPUPartitionedCall", len(Tout), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUPartitionedCall", _inputs_flat, _attrs, _result) + return _result + + +def tpu_replicate_metadata(num_replicas: int, num_cores_per_replica:int=1, topology:str="", use_tpu:bool=True, device_assignment=[], computation_shape=[], host_compute_core=[], padding_map=[], step_marker_location:str="STEP_MARK_AT_ENTRY", allow_soft_placement:bool=False, use_spmd_for_xla_partitioning:bool=False, tpu_compile_options_proto:str="", name=None): + r"""Metadata indicating how the TPU computation should be replicated. + + This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. + + Args: + num_replicas: An `int` that is `>= 0`. + Number of replicas of the computation + num_cores_per_replica: An optional `int`. Defaults to `1`. + Number of cores per replica. Used for model parallelism. + topology: An optional `string`. Defaults to `""`. + TopologyProto indicating the topology of the TPU pod slice. + use_tpu: An optional `bool`. Defaults to `True`. + Whether to place the computation on the TPU. + device_assignment: An optional list of `ints`. Defaults to `[]`. + The assignment of devices for the computation. + computation_shape: An optional list of `ints`. Defaults to `[]`. + DEPRECATED. Use num_cores_per_replica instead. + host_compute_core: An optional list of `strings`. Defaults to `[]`. + padding_map: An optional list of `strings`. Defaults to `[]`. + step_marker_location: An optional `string`. Defaults to `"STEP_MARK_AT_ENTRY"`. + allow_soft_placement: An optional `bool`. Defaults to `False`. + use_spmd_for_xla_partitioning: An optional `bool`. Defaults to `False`. + tpu_compile_options_proto: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUReplicateMetadata", name, "num_replicas", num_replicas, + "num_cores_per_replica", num_cores_per_replica, "topology", topology, + "use_tpu", use_tpu, "device_assignment", device_assignment, + "computation_shape", computation_shape, "host_compute_core", + host_compute_core, "padding_map", padding_map, "step_marker_location", + step_marker_location, "allow_soft_placement", allow_soft_placement, + "use_spmd_for_xla_partitioning", use_spmd_for_xla_partitioning, + "tpu_compile_options_proto", tpu_compile_options_proto) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_replicate_metadata_eager_fallback( + num_replicas=num_replicas, + num_cores_per_replica=num_cores_per_replica, topology=topology, + use_tpu=use_tpu, device_assignment=device_assignment, + computation_shape=computation_shape, + host_compute_core=host_compute_core, padding_map=padding_map, + step_marker_location=step_marker_location, + allow_soft_placement=allow_soft_placement, + use_spmd_for_xla_partitioning=use_spmd_for_xla_partitioning, + tpu_compile_options_proto=tpu_compile_options_proto, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_replicas = _execute.make_int(num_replicas, "num_replicas") + if num_cores_per_replica is None: + num_cores_per_replica = 1 + num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica") + if topology is None: + topology = "" + topology = _execute.make_str(topology, "topology") + if use_tpu is None: + use_tpu = True + use_tpu = _execute.make_bool(use_tpu, "use_tpu") + if device_assignment is None: + device_assignment = [] + if not isinstance(device_assignment, (list, tuple)): + raise TypeError( + "Expected list for 'device_assignment' argument to " + "'tpu_replicate_metadata' Op, not %r." % device_assignment) + device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment] + if computation_shape is None: + computation_shape = [] + if not isinstance(computation_shape, (list, tuple)): + raise TypeError( + "Expected list for 'computation_shape' argument to " + "'tpu_replicate_metadata' Op, not %r." % computation_shape) + computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape] + if host_compute_core is None: + host_compute_core = [] + if not isinstance(host_compute_core, (list, tuple)): + raise TypeError( + "Expected list for 'host_compute_core' argument to " + "'tpu_replicate_metadata' Op, not %r." % host_compute_core) + host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core] + if padding_map is None: + padding_map = [] + if not isinstance(padding_map, (list, tuple)): + raise TypeError( + "Expected list for 'padding_map' argument to " + "'tpu_replicate_metadata' Op, not %r." % padding_map) + padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map] + if step_marker_location is None: + step_marker_location = "STEP_MARK_AT_ENTRY" + step_marker_location = _execute.make_str(step_marker_location, "step_marker_location") + if allow_soft_placement is None: + allow_soft_placement = False + allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement") + if use_spmd_for_xla_partitioning is None: + use_spmd_for_xla_partitioning = False + use_spmd_for_xla_partitioning = _execute.make_bool(use_spmd_for_xla_partitioning, "use_spmd_for_xla_partitioning") + if tpu_compile_options_proto is None: + tpu_compile_options_proto = "" + tpu_compile_options_proto = _execute.make_str(tpu_compile_options_proto, "tpu_compile_options_proto") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUReplicateMetadata", num_replicas=num_replicas, + num_cores_per_replica=num_cores_per_replica, + topology=topology, use_tpu=use_tpu, + device_assignment=device_assignment, + computation_shape=computation_shape, + host_compute_core=host_compute_core, + padding_map=padding_map, + step_marker_location=step_marker_location, + allow_soft_placement=allow_soft_placement, + use_spmd_for_xla_partitioning=use_spmd_for_xla_partitioning, + tpu_compile_options_proto=tpu_compile_options_proto, + name=name) + return _op +TPUReplicateMetadata = tf_export("raw_ops.TPUReplicateMetadata")(_ops.to_raw_op(tpu_replicate_metadata)) + + +def tpu_replicate_metadata_eager_fallback(num_replicas: int, num_cores_per_replica: int, topology: str, use_tpu: bool, device_assignment, computation_shape, host_compute_core, padding_map, step_marker_location: str, allow_soft_placement: bool, use_spmd_for_xla_partitioning: bool, tpu_compile_options_proto: str, name, ctx): + num_replicas = _execute.make_int(num_replicas, "num_replicas") + if num_cores_per_replica is None: + num_cores_per_replica = 1 + num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica") + if topology is None: + topology = "" + topology = _execute.make_str(topology, "topology") + if use_tpu is None: + use_tpu = True + use_tpu = _execute.make_bool(use_tpu, "use_tpu") + if device_assignment is None: + device_assignment = [] + if not isinstance(device_assignment, (list, tuple)): + raise TypeError( + "Expected list for 'device_assignment' argument to " + "'tpu_replicate_metadata' Op, not %r." % device_assignment) + device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment] + if computation_shape is None: + computation_shape = [] + if not isinstance(computation_shape, (list, tuple)): + raise TypeError( + "Expected list for 'computation_shape' argument to " + "'tpu_replicate_metadata' Op, not %r." % computation_shape) + computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape] + if host_compute_core is None: + host_compute_core = [] + if not isinstance(host_compute_core, (list, tuple)): + raise TypeError( + "Expected list for 'host_compute_core' argument to " + "'tpu_replicate_metadata' Op, not %r." % host_compute_core) + host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core] + if padding_map is None: + padding_map = [] + if not isinstance(padding_map, (list, tuple)): + raise TypeError( + "Expected list for 'padding_map' argument to " + "'tpu_replicate_metadata' Op, not %r." % padding_map) + padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map] + if step_marker_location is None: + step_marker_location = "STEP_MARK_AT_ENTRY" + step_marker_location = _execute.make_str(step_marker_location, "step_marker_location") + if allow_soft_placement is None: + allow_soft_placement = False + allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement") + if use_spmd_for_xla_partitioning is None: + use_spmd_for_xla_partitioning = False + use_spmd_for_xla_partitioning = _execute.make_bool(use_spmd_for_xla_partitioning, "use_spmd_for_xla_partitioning") + if tpu_compile_options_proto is None: + tpu_compile_options_proto = "" + tpu_compile_options_proto = _execute.make_str(tpu_compile_options_proto, "tpu_compile_options_proto") + _inputs_flat = [] + _attrs = ("num_replicas", num_replicas, "num_cores_per_replica", + num_cores_per_replica, "topology", topology, "use_tpu", use_tpu, + "device_assignment", device_assignment, "computation_shape", + computation_shape, "host_compute_core", host_compute_core, "padding_map", + padding_map, "step_marker_location", step_marker_location, + "allow_soft_placement", allow_soft_placement, + "use_spmd_for_xla_partitioning", use_spmd_for_xla_partitioning, + "tpu_compile_options_proto", tpu_compile_options_proto) + _result = _execute.execute(b"TPUReplicateMetadata", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +TV_TPUReplicatedInput_T = TypeVar("TV_TPUReplicatedInput_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_replicated_input(inputs: Annotated[List[Any], TV_TPUReplicatedInput_T], is_mirrored_variable:bool=False, index:int=-1, is_packed:bool=False, name=None) -> Annotated[Any, TV_TPUReplicatedInput_T]: + r"""Connects N inputs to an N-way replicated TPU computation. + + This operation holds a replicated input to a `tpu.replicate()` computation subgraph. + Each replicated input has the same shape and type alongside the output. + + For example: + ``` + %a = "tf.opA"() + %b = "tf.opB"() + %replicated_input = "tf.TPUReplicatedInput"(%a, %b) + %computation = "tf.Computation"(%replicated_input) + ``` + The above computation has a replicated input of two replicas. + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type. + is_mirrored_variable: An optional `bool`. Defaults to `False`. + index: An optional `int`. Defaults to `-1`. + is_packed: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUReplicatedInput", name, inputs, "is_mirrored_variable", + is_mirrored_variable, "index", index, "is_packed", is_packed) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_replicated_input_eager_fallback( + inputs, is_mirrored_variable=is_mirrored_variable, index=index, + is_packed=is_packed, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_replicated_input' Op, not %r." % inputs) + _attr_N = len(inputs) + if is_mirrored_variable is None: + is_mirrored_variable = False + is_mirrored_variable = _execute.make_bool(is_mirrored_variable, "is_mirrored_variable") + if index is None: + index = -1 + index = _execute.make_int(index, "index") + if is_packed is None: + is_packed = False + is_packed = _execute.make_bool(is_packed, "is_packed") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUReplicatedInput", inputs=inputs, + is_mirrored_variable=is_mirrored_variable, + index=index, is_packed=is_packed, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), + "is_mirrored_variable", + _op._get_attr_bool("is_mirrored_variable"), "index", + _op._get_attr_int("index"), "is_packed", + _op._get_attr_bool("is_packed")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUReplicatedInput", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +TPUReplicatedInput = tf_export("raw_ops.TPUReplicatedInput")(_ops.to_raw_op(tpu_replicated_input)) + + +def tpu_replicated_input_eager_fallback(inputs: Annotated[List[Any], TV_TPUReplicatedInput_T], is_mirrored_variable: bool, index: int, is_packed: bool, name, ctx) -> Annotated[Any, TV_TPUReplicatedInput_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'tpu_replicated_input' Op, not %r." % inputs) + _attr_N = len(inputs) + if is_mirrored_variable is None: + is_mirrored_variable = False + is_mirrored_variable = _execute.make_bool(is_mirrored_variable, "is_mirrored_variable") + if index is None: + index = -1 + index = _execute.make_int(index, "index") + if is_packed is None: + is_packed = False + is_packed = _execute.make_bool(is_packed, "is_packed") + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) + _inputs_flat = list(inputs) + _attrs = ("N", _attr_N, "T", _attr_T, "is_mirrored_variable", + is_mirrored_variable, "index", index, "is_packed", is_packed) + _result = _execute.execute(b"TPUReplicatedInput", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUReplicatedInput", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_TPUReplicatedOutput_T = TypeVar("TV_TPUReplicatedOutput_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def tpu_replicated_output(input: Annotated[Any, TV_TPUReplicatedOutput_T], num_replicas: int, name=None): + r"""Connects N outputs from an N-way replicated TPU computation. + + This operation holds a replicated output from a `tpu.replicate()` computation subgraph. + Each replicated output has the same shape and type alongside the input. + + For example: + ``` + %computation = "tf.Computation"() + %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) + ``` + The above computation has a replicated output of two replicas. + + Args: + input: A `Tensor`. + num_replicas: An `int` that is `>= 1`. + name: A name for the operation (optional). + + Returns: + A list of `num_replicas` `Tensor` objects with the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "TPUReplicatedOutput", name, input, "num_replicas", + num_replicas) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return tpu_replicated_output_eager_fallback( + input, num_replicas=num_replicas, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_replicas = _execute.make_int(num_replicas, "num_replicas") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "TPUReplicatedOutput", input=input, num_replicas=num_replicas, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_replicas", _op._get_attr_int("num_replicas"), "T", + _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "TPUReplicatedOutput", _inputs_flat, _attrs, _result) + return _result + +TPUReplicatedOutput = tf_export("raw_ops.TPUReplicatedOutput")(_ops.to_raw_op(tpu_replicated_output)) + + +def tpu_replicated_output_eager_fallback(input: Annotated[Any, TV_TPUReplicatedOutput_T], num_replicas: int, name, ctx): + num_replicas = _execute.make_int(num_replicas, "num_replicas") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("num_replicas", num_replicas, "T", _attr_T) + _result = _execute.execute(b"TPUReplicatedOutput", num_replicas, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "TPUReplicatedOutput", _inputs_flat, _attrs, _result) + return _result + + +def worker_heartbeat(request: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.String]: + r"""Worker heartbeat op. + + Heartbeats may be sent periodically to indicate the coordinator is still active, + to retrieve the current worker status and to expedite shutdown when necessary. + + Args: + request: A `Tensor` of type `string`. + A string tensor containing a serialized WorkerHeartbeatRequest + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "WorkerHeartbeat", name, request) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return worker_heartbeat_eager_fallback( + request, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "WorkerHeartbeat", request=request, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "WorkerHeartbeat", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +WorkerHeartbeat = tf_export("raw_ops.WorkerHeartbeat")(_ops.to_raw_op(worker_heartbeat)) + + +def worker_heartbeat_eager_fallback(request: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.String]: + request = _ops.convert_to_tensor(request, _dtypes.string) + _inputs_flat = [request] + _attrs = None + _result = _execute.execute(b"WorkerHeartbeat", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "WorkerHeartbeat", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaConcatND_T = TypeVar("TV_XlaConcatND_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def xla_concat_nd(inputs: Annotated[List[Any], TV_XlaConcatND_T], num_concats, paddings=[], name=None) -> Annotated[Any, TV_XlaConcatND_T]: + r"""Concats input tensor across all dimensions. + + An op which merges slices the input tensor based on the given num_splits + attribute, strips paddings optionally, and returns the merged tensor without + paddings. + + This op may be generated via the TPU bridge. + + For example, with `input` tensor: + ``` + [[0, 1], + [4, 5]] + [[2, 3], + [6, 7]] + [[8, 9], + [12, 13]] + [[10, 11], + [14, 15]] + ``` + `num_splits`: + ``` + [2, 2] + ``` + and `paddings`: + ``` + [1, 1] + ``` + the expected `outputs` is: + ``` + [[0, 1, 2], + [4, 5, 6], + [8, 9, 10]] + ``` + + Args: + inputs: A list of at least 1 `Tensor` objects with the same type. + Input tensor slices in row-major order to merge across all dimensions. All + inputs must have the same shape. + } + out_arg { + name: "output" + description: < Annotated[Any, TV_XlaConcatND_T]: + if not isinstance(inputs, (list, tuple)): + raise TypeError( + "Expected list for 'inputs' argument to " + "'xla_concat_nd' Op, not %r." % inputs) + _attr_N = len(inputs) + if not isinstance(num_concats, (list, tuple)): + raise TypeError( + "Expected list for 'num_concats' argument to " + "'xla_concat_nd' Op, not %r." % num_concats) + num_concats = [_execute.make_int(_i, "num_concats") for _i in num_concats] + if paddings is None: + paddings = [] + if not isinstance(paddings, (list, tuple)): + raise TypeError( + "Expected list for 'paddings' argument to " + "'xla_concat_nd' Op, not %r." % paddings) + paddings = [_execute.make_int(_i, "paddings") for _i in paddings] + _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) + _inputs_flat = list(inputs) + _attrs = ("T", _attr_T, "N", _attr_N, "num_concats", num_concats, + "paddings", paddings) + _result = _execute.execute(b"XlaConcatND", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaConcatND", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSplitND_T = TypeVar("TV_XlaSplitND_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def xla_split_nd(input: Annotated[Any, TV_XlaSplitND_T], N: int, num_splits, paddings=[], name=None): + r"""Splits input tensor across all dimensions. + + An op which slices the input tensor based on the given num_splits attribute, + pads slices optionally, and returned the slices. Slices are returned in + row-major order. + + This op may be generated via the TPU bridge. + + For example, with `input` tensor: + ``` + [[0, 1, 2], + [3, 4, 5], + [6, 7, 8]] + ``` + `num_splits`: + ``` + [2, 2] + ``` + and `paddings`: + ``` + [1, 1] + ``` + the expected `outputs` is: + ``` + [[0, 1], + [3, 4]] + [[2, 0], + [5, 0]] + [[6, 7], + [0, 0]] + [[8, 0], + [0, 0]] + ``` + + Args: + input: A `Tensor`. Input tensor to split across all dimensions. + } + out_arg { + name: "outputs" + description: <= 1`. + num_splits: A list of `ints`. + Number of ways to split per dimension. Shape dimensions must be evenly + divisible. + paddings: An optional list of `ints`. Defaults to `[]`. + Optional list of right paddings per dimension of input tensor to apply before + splitting. This can be used to make a dimension evenly divisible. + name: A name for the operation (optional). + + Returns: + A list of `N` `Tensor` objects with the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSplitND", name, input, "N", N, "num_splits", num_splits, + "paddings", paddings) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return xla_split_nd_eager_fallback( + input, N=N, num_splits=num_splits, paddings=paddings, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + N = _execute.make_int(N, "N") + if not isinstance(num_splits, (list, tuple)): + raise TypeError( + "Expected list for 'num_splits' argument to " + "'xla_split_nd' Op, not %r." % num_splits) + num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] + if paddings is None: + paddings = [] + if not isinstance(paddings, (list, tuple)): + raise TypeError( + "Expected list for 'paddings' argument to " + "'xla_split_nd' Op, not %r." % paddings) + paddings = [_execute.make_int(_i, "paddings") for _i in paddings] + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSplitND", input=input, N=N, num_splits=num_splits, + paddings=paddings, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "N", _op._get_attr_int("N"), + "num_splits", _op.get_attr("num_splits"), "paddings", + _op.get_attr("paddings")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSplitND", _inputs_flat, _attrs, _result) + return _result + +XlaSplitND = tf_export("raw_ops.XlaSplitND")(_ops.to_raw_op(xla_split_nd)) + + +def xla_split_nd_eager_fallback(input: Annotated[Any, TV_XlaSplitND_T], N: int, num_splits, paddings, name, ctx): + N = _execute.make_int(N, "N") + if not isinstance(num_splits, (list, tuple)): + raise TypeError( + "Expected list for 'num_splits' argument to " + "'xla_split_nd' Op, not %r." % num_splits) + num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] + if paddings is None: + paddings = [] + if not isinstance(paddings, (list, tuple)): + raise TypeError( + "Expected list for 'paddings' argument to " + "'xla_split_nd' Op, not %r." % paddings) + paddings = [_execute.make_int(_i, "paddings") for _i in paddings] + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "N", N, "num_splits", num_splits, "paddings", + paddings) + _result = _execute.execute(b"XlaSplitND", N, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSplitND", _inputs_flat, _attrs, _result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_ops_impl.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_ops_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..45393ccba5349b79e3a185d3c463c7b297607e31 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg_ops_impl.py @@ -0,0 +1,83 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for linear algebra.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.util import compat + +# Names below are lower_case. +# pylint: disable=invalid-name + + +def eye(num_rows, + num_columns=None, + batch_shape=None, + dtype=dtypes.float32, + name=None): + """Construct an identity matrix, or a batch of matrices. + + See `linalg_ops.eye`. + """ + with ops.name_scope( + name, default_name='eye', values=[num_rows, num_columns, batch_shape]): + is_square = num_columns is None + batch_shape = [] if batch_shape is None else batch_shape + num_columns = num_rows if num_columns is None else num_columns + + # We cannot statically infer what the diagonal size should be: + if (isinstance(num_rows, tensor.Tensor) or + isinstance(num_columns, tensor.Tensor)): + diag_size = math_ops.minimum(num_rows, num_columns) + else: + # We can statically infer the diagonal size, and whether it is square. + if not isinstance(num_rows, compat.integral_types) or not isinstance( + num_columns, compat.integral_types): + raise TypeError( + 'Arguments `num_rows` and `num_columns` must be positive integer ' + f'values. Received: num_rows={num_rows}, num_columns={num_columns}') + is_square = num_rows == num_columns + diag_size = np.minimum(num_rows, num_columns) + + # We can not statically infer the shape of the tensor. + if isinstance(batch_shape, tensor.Tensor) or isinstance( + diag_size, tensor.Tensor + ): + batch_shape = ops.convert_to_tensor( + batch_shape, name='shape', dtype=dtypes.int32 + ) + diag_shape = array_ops.concat((batch_shape, [diag_size]), axis=0) + if not is_square: + shape = array_ops.concat((batch_shape, [num_rows, num_columns]), axis=0) + # We can statically infer everything. + else: + batch_shape = list(batch_shape) + diag_shape = batch_shape + [diag_size] + if not is_square: + shape = batch_shape + [num_rows, num_columns] + + diag_ones = array_ops.ones(diag_shape, dtype=dtype) + if is_square: + return array_ops.matrix_diag(diag_ones) + else: + zero_matrix = array_ops.zeros(shape, dtype=dtype) + return array_ops.matrix_set_diag(zero_matrix, diag_ones) + +# pylint: enable=invalid-name,redefined-builtin diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/logging_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/logging_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d14b41af05404b22eb6d871a738222eeb087f350 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/logging_ops.py @@ -0,0 +1,701 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Logging and Summary Operations. + +API docstring: tensorflow.logging +""" +# pylint: disable=protected-access +import collections as py_collections +import os +import pprint +import random +import sys + +from absl import logging + +from tensorflow.python import pywrap_tfe +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import gen_logging_ops +from tensorflow.python.ops import string_ops +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_logging_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import dispatch +from tensorflow.python.util import nest +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.tf_export import tf_export + + +def enable_interactive_logging(): + pywrap_tfe.TFE_Py_EnableInteractivePythonLogging() + +# Register printing to the cell output if we are in a Colab or Jupyter Notebook. +try: + get_ipython() # Exists in an ipython env like Jupyter or Colab + enable_interactive_logging() +except NameError: + pass + +# The python wrapper for Assert is in control_flow_ops, as the Assert +# call relies on certain conditionals for its dependencies. Use +# control_flow_ops.Assert. + +# Assert and Print are special symbols in Python 2, so we must +# have an upper-case version of them. When support for it is dropped, +# we can allow lowercase. +# See https://github.com/tensorflow/tensorflow/issues/18053 + + +# pylint: disable=invalid-name +@deprecated("2018-08-20", "Use tf.print instead of tf.Print. Note that " + "tf.print returns a no-output operator that directly " + "prints the output. Outside of defuns or eager mode, " + "this operator will not be executed unless it is " + "directly specified in session.run or used as a " + "control dependency for other operators. This is " + "only a concern in graph mode. Below is an example " + "of how to ensure tf.print executes in graph mode:\n") +@tf_export(v1=["Print"]) +@dispatch.add_dispatch_support +def Print(input_, data, message=None, first_n=None, summarize=None, name=None): + """Prints a list of tensors. + + This is an identity op (behaves like `tf.identity`) with the side effect + of printing `data` when evaluating. + + Note: This op prints to the standard error. It is not currently compatible + with jupyter notebook (printing to the notebook *server's* output, not into + the notebook). + + @compatibility(TF2) + This API is deprecated. Use `tf.print` instead. `tf.print` does not need the + `input_` argument. + + `tf.print` works in TF2 when executing eagerly and inside a `tf.function`. + + In TF1-styled sessions, an explicit control dependency declaration is needed + to execute the `tf.print` operation. Refer to the documentation of + `tf.print` for more details. + @end_compatibility + + Args: + input_: A tensor passed through this op. + data: A list of tensors to print out when op is evaluated. + message: A string, prefix of the error message. + first_n: Only log `first_n` number of times. Negative numbers log always; + this is the default. + summarize: Only print this many entries of each tensor. If None, then a + maximum of 3 elements are printed per input tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type and contents as `input_`. + + ```python + sess = tf.compat.v1.Session() + with sess.as_default(): + tensor = tf.range(10) + print_op = tf.print(tensor) + with tf.control_dependencies([print_op]): + out = tf.add(tensor, tensor) + sess.run(out) + ``` + """ + return gen_logging_ops._print(input_, data, message, first_n, summarize, name) + + +# pylint: enable=invalid-name + + +def _generate_placeholder_string(x, default_placeholder="{}"): + """Generate and return a string that does not appear in `x`.""" + placeholder = default_placeholder + rng = random.Random(5) + while placeholder in x: + placeholder = placeholder + str(rng.randint(0, 9)) + return placeholder + + +def _is_filepath(output_stream): + """Returns True if output_stream is a file path.""" + return isinstance(output_stream, str) and output_stream.startswith("file://") + + +# Temporarily disable pylint g-doc-args error to allow giving more context +# about what the kwargs are. +# Because we are using arbitrary-length positional arguments, python 2 +# does not support explicitly specifying the keyword arguments in the +# function definition. +# pylint: disable=g-doc-args +@tf_export("print") +@dispatch.add_dispatch_support +def print_v2(*inputs, **kwargs): + """Print the specified inputs. + + A TensorFlow operator that prints the specified inputs to a desired + output stream or logging level. The inputs may be dense or sparse Tensors, + primitive python objects, data structures that contain tensors, and printable + Python objects. Printed tensors will recursively show the first and last + elements of each dimension to summarize. + + Example: + Single-input usage: + + ```python + tensor = tf.range(10) + tf.print(tensor, output_stream=sys.stderr) + ``` + + (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) + + Multi-input usage: + + ```python + tensor = tf.range(10) + tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout) + ``` + + (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to + sys.stdout) + + Changing the input separator: + ```python + tensor_a = tf.range(2) + tensor_b = tensor_a * 2 + tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',') + ``` + + (This prints "[0 1],[0 2]" to sys.stderr) + + Usage in a `tf.function`: + + ```python + @tf.function + def f(): + tensor = tf.range(10) + tf.print(tensor, output_stream=sys.stderr) + return tensor + + range_tensor = f() + ``` + + (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) + + *Compatibility usage in TF 1.x graphs*: + + In graphs manually created outside of `tf.function`, this method returns + the created TF operator that prints the data. To make sure the + operator runs, users need to pass the produced op to + `tf.compat.v1.Session`'s run method, or to use the op as a control + dependency for executed ops by specifying + `with tf.compat.v1.control_dependencies([print_op])`. + + ```python + tf.compat.v1.disable_v2_behavior() # for TF1 compatibility only + + sess = tf.compat.v1.Session() + with sess.as_default(): + tensor = tf.range(10) + print_op = tf.print("tensors:", tensor, {2: tensor * 2}, + output_stream=sys.stdout) + with tf.control_dependencies([print_op]): + tripled_tensor = tensor * 3 + + sess.run(tripled_tensor) + ``` + + (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to + sys.stdout) + + Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook + cell outputs. It will not write to the notebook kernel's console logs. + + Args: + *inputs: Positional arguments that are the inputs to print. Inputs in the + printed output will be separated by spaces. Inputs may be python + primitives, tensors, data structures such as dicts and lists that may + contain tensors (with the data structures possibly nested in arbitrary + ways), and printable python objects. + output_stream: The output stream, logging level, or file to print to. + Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info, + tf.compat.v1.logging.warning, tf.compat.v1.logging.error, + absl.logging.info, absl.logging.warning and absl.logging.error are also + supported. To print to a file, pass a string started with "file://" + followed by the file path, e.g., "file:///tmp/foo.out". + summarize: The first and last `summarize` elements within each dimension are + recursively printed per Tensor. If None, then the first 3 and last 3 + elements of each dimension are printed for each tensor. If set to -1, it + will print all elements of every tensor. + sep: The string to use to separate the inputs. Defaults to " ". + end: End character that is appended at the end the printed string. Defaults + to the newline character. + name: A name for the operation (optional). + + Returns: + None when executing eagerly. During graph tracing this returns + a TF operator that prints the specified inputs in the specified output + stream or logging level. This operator will be automatically executed + except inside of `tf.compat.v1` graphs and sessions. + + Raises: + ValueError: If an unsupported output stream is specified. + """ + # Because we are using arbitrary-length positional arguments, python 2 + # does not support explicitly specifying the keyword arguments in the + # function definition. So, we manually get the keyword arguments w/ default + # values here. + output_stream = kwargs.pop("output_stream", sys.stderr) + name = kwargs.pop("name", None) + summarize = kwargs.pop("summarize", 3) + sep = kwargs.pop("sep", " ") + end = kwargs.pop("end", os.linesep) + if kwargs: + raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs) + format_name = None + if name: + format_name = name + "_format" + + # Match the C++ string constants representing the different output streams. + # Keep this updated! + output_stream_to_constant = { + sys.stdout: "stdout", + sys.stderr: "stderr", + tf_logging.INFO: "log(info)", + tf_logging.info: "log(info)", + tf_logging.WARN: "log(warning)", + tf_logging.warning: "log(warning)", + tf_logging.warn: "log(warning)", + tf_logging.ERROR: "log(error)", + tf_logging.error: "log(error)", + logging.INFO: "log(info)", + logging.info: "log(info)", + logging.INFO: "log(info)", + logging.WARNING: "log(warning)", + logging.WARN: "log(warning)", + logging.warning: "log(warning)", + logging.warn: "log(warning)", + logging.ERROR: "log(error)", + logging.error: "log(error)", + } + + if _is_filepath(output_stream): + output_stream_string = output_stream + else: + output_stream_string = output_stream_to_constant.get(output_stream) + if not output_stream_string: + raise ValueError("Unsupported output stream, logging level, or file." + + str(output_stream) + + ". Supported streams are sys.stdout, " + "sys.stderr, tf.logging.info, " + "tf.logging.warning, tf.logging.error. " + + "File needs to be in the form of 'file://'.") + + # If we are only printing a single string scalar, there is no need to format + if (len(inputs) == 1 and tensor_util.is_tf_type(inputs[0]) and + (not isinstance(inputs[0], sparse_tensor.SparseTensor)) and + (inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)): + formatted_string = inputs[0] + # Otherwise, we construct an appropriate template for the tensors we are + # printing, and format the template using those tensors. + else: + # For each input to this print function, we extract any nested tensors, + # and construct an appropriate template to format representing the + # printed input. + templates = [] + tensors = [] + # If an input to the print function is of type `OrderedDict`, sort its + # elements by the keys for consistency with the ordering of `nest.flatten`. + # This is not needed for `dict` types because `pprint.pformat()` takes care + # of printing the template in a sorted fashion. + inputs_ordered_dicts_sorted = [] + for input_ in inputs: + if isinstance(input_, py_collections.OrderedDict): + inputs_ordered_dicts_sorted.append( + py_collections.OrderedDict(sorted(input_.items()))) + else: + inputs_ordered_dicts_sorted.append(input_) + tensor_free_structure = nest.map_structure( + lambda x: "" if tensor_util.is_tf_type(x) else x, + inputs_ordered_dicts_sorted) + + tensor_free_template = " ".join( + pprint.pformat(x) for x in tensor_free_structure) + placeholder = _generate_placeholder_string(tensor_free_template) + + for input_ in inputs: + placeholders = [] + # Use the nest utilities to flatten & process any nested elements in this + # input. The placeholder for a tensor in the template should be the + # placeholder string, and the placeholder for a non-tensor can just be + # the printed value of the non-tensor itself. + for x in nest.flatten(input_): + # support sparse tensors + if isinstance(x, sparse_tensor.SparseTensor): + tensors.extend([x.indices, x.values, x.dense_shape]) + placeholders.append( + "SparseTensor(indices={}, values={}, shape={})".format( + placeholder, placeholder, placeholder)) + elif tensor_util.is_tf_type(x): + tensors.append(x) + placeholders.append(placeholder) + else: + placeholders.append(x) + + if isinstance(input_, str): + # If the current input to format/print is a normal string, that string + # can act as the template. + cur_template = input_ + else: + # We pack the placeholders into a data structure that matches the + # input data structure format, then format that data structure + # into a string template. + # + # NOTE: We must use pprint.pformat here for building the template for + # unordered data structures such as `dict`, because `str` doesn't + # guarantee orderings, while pprint prints in sorted order. pprint + # will match the ordering of `nest.flatten`. + # This even works when nest.flatten reorders OrderedDicts, because + # pprint is printing *after* the OrderedDicts have been reordered. + cur_template = pprint.pformat( + nest.pack_sequence_as(input_, placeholders)) + templates.append(cur_template) + + # We join the templates for the various inputs into a single larger + # template. We also remove all quotes surrounding the placeholders, so that + # the formatted/printed output will not contain quotes around tensors. + # (example of where these quotes might appear: if we have added a + # placeholder string into a list, then pretty-formatted that list) + template = sep.join(templates) + template = template.replace("'" + placeholder + "'", placeholder) + formatted_string = string_ops.string_format( + inputs=tensors, + template=template, + placeholder=placeholder, + summarize=summarize, + name=format_name) + + return gen_logging_ops.print_v2( + formatted_string, output_stream=output_stream_string, name=name, end=end) + + +# pylint: enable=g-doc-args + + +@ops.RegisterGradient("Print") +def _PrintGrad(op, *grad): + return list(grad) + [None] * (len(op.inputs) - 1) + + +def _Collect(val, collections, default_collections): + if collections is None: + collections = default_collections + for key in collections: + ops.add_to_collection(key, val) + + +@deprecated( + "2016-11-30", "Please switch to tf.summary.histogram. Note that " + "tf.summary.histogram uses the node name instead of the tag. " + "This means that TensorFlow will automatically de-duplicate summary " + "names based on the scope they are created in.") +def histogram_summary(tag, values, collections=None, name=None): + # pylint: disable=line-too-long + """Outputs a `Summary` protocol buffer with a histogram. + + This ops is deprecated. Please switch to tf.summary.histogram. + + For an explanation of why this op was deprecated, and information on how to + migrate, look + ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + + This op reports an `InvalidArgument` error if any value is not finite. + + Args: + tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. + values: A real numeric `Tensor`. Any shape. Values to use to build the + histogram. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope: + val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope) + _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) + return val + + +@deprecated( + "2016-11-30", "Please switch to tf.summary.image. Note that " + "tf.summary.image uses the node name instead of the tag. " + "This means that TensorFlow will automatically de-duplicate summary " + "names based on the scope they are created in. Also, the max_images " + "argument was renamed to max_outputs.") +def image_summary(tag, tensor, max_images=3, collections=None, name=None): + # pylint: disable=line-too-long + """Outputs a `Summary` protocol buffer with images. + + For an explanation of why this op was deprecated, and information on how to + migrate, look + ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) + + The summary has up to `max_images` summary values containing images. The + images are built from `tensor` which must be 4-D with shape `[batch_size, + height, width, channels]` and where `channels` can be: + + * 1: `tensor` is interpreted as Grayscale. + * 3: `tensor` is interpreted as RGB. + * 4: `tensor` is interpreted as RGBA. + + The images have the same number of channels as the input tensor. For float + input, the values are normalized one image at a time to fit in the range + `[0, 255]`. `uint8` values are unchanged. The op uses two different + normalization algorithms: + + * If the input values are all positive, they are rescaled so the largest one + is 255. + + * If any input value is negative, the values are shifted so input value 0.0 + is at 127. They are then rescaled so that either the smallest value is 0, + or the largest one is 255. + + The `tag` argument is a scalar `Tensor` of type `string`. It is used to + build the `tag` of the summary values: + + * If `max_images` is 1, the summary value tag is '*tag*/image'. + * If `max_images` is greater than 1, the summary value tags are + generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. + + Args: + tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the + summary values. + tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, + width, channels]` where `channels` is 1, 3, or 4. + max_images: Max number of batch elements to generate images for. + collections: Optional list of ops.GraphKeys. The collections to add the + summary to. Defaults to [ops.GraphKeys.SUMMARIES] + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope: + val = gen_logging_ops.image_summary( + tag=tag, tensor=tensor, max_images=max_images, name=scope) + _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) + return val + + +@deprecated( + "2016-11-30", "Please switch to tf.summary.audio. Note that " + "tf.summary.audio uses the node name instead of the tag. " + "This means that TensorFlow will automatically de-duplicate summary " + "names based on the scope they are created in.") +def audio_summary(tag, + tensor, + sample_rate, + max_outputs=3, + collections=None, + name=None): + # pylint: disable=line-too-long + """Outputs a `Summary` protocol buffer with audio. + + This op is deprecated. Please switch to tf.summary.audio. + For an explanation of why this op was deprecated, and information on how to + migrate, look + ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) + + The summary has up to `max_outputs` summary values containing audio. The + audio is built from `tensor` which must be 3-D with shape `[batch_size, + frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of + `sample_rate`. + + The `tag` argument is a scalar `Tensor` of type `string`. It is used to + build the `tag` of the summary values: + + * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. + * If `max_outputs` is greater than 1, the summary value tags are + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + + Args: + tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the + summary values. + tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` + or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. + sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the + signal in hertz. + max_outputs: Max number of batch elements to generate audio for. + collections: Optional list of ops.GraphKeys. The collections to add the + summary to. Defaults to [ops.GraphKeys.SUMMARIES] + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope: + sample_rate = ops.convert_to_tensor( + sample_rate, dtype=dtypes.float32, name="sample_rate") + val = gen_logging_ops.audio_summary_v2( + tag=tag, + tensor=tensor, + max_outputs=max_outputs, + sample_rate=sample_rate, + name=scope) + _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) + return val + + +@deprecated("2016-11-30", "Please switch to tf.summary.merge.") +def merge_summary(inputs, collections=None, name=None): + # pylint: disable=line-too-long + """Merges summaries. + + This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has + identical + behavior. + + This op creates a + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + protocol buffer that contains the union of all the values in the input + summaries. + + When the Op is run, it reports an `InvalidArgument` error if multiple values + in the summaries to merge use the same tag. + + Args: + inputs: A list of `string` `Tensor` objects containing serialized `Summary` + protocol buffers. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer resulting from the merging. + """ + with ops.name_scope(name, "MergeSummary", inputs): + val = gen_logging_ops.merge_summary(inputs=inputs, name=name) + _Collect(val, collections, []) + return val + + +@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.") +def merge_all_summaries(key=ops.GraphKeys.SUMMARIES): + """Merges all summaries collected in the default graph. + + This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which + has + identical behavior. + + Args: + key: `GraphKey` used to collect the summaries. Defaults to + `GraphKeys.SUMMARIES`. + + Returns: + If no summaries were collected, returns None. Otherwise returns a scalar + `Tensor` of type `string` containing the serialized `Summary` protocol + buffer resulting from the merging. + """ + summary_ops = ops.get_collection(key) + if not summary_ops: + return None + else: + return merge_summary(summary_ops) + + +def get_summary_op(): + """Returns a single Summary op that would run all summaries. + + Either existing one from `SUMMARY_OP` collection or merges all existing + summaries. + + Returns: + If no summaries were collected, returns None. Otherwise returns a scalar + `Tensor` of type `string` containing the serialized `Summary` protocol + buffer resulting from the merging. + """ + summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP) + if summary_op is not None: + if summary_op: + summary_op = summary_op[0] + else: + summary_op = None + if summary_op is None: + summary_op = merge_all_summaries() + if summary_op is not None: + ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op) + return summary_op + + +@deprecated( + "2016-11-30", "Please switch to tf.summary.scalar. Note that " + "tf.summary.scalar uses the node name instead of the tag. " + "This means that TensorFlow will automatically de-duplicate summary " + "names based on the scope they are created in. Also, passing a " + "tensor or list of tags to a scalar summary op is no longer " + "supported.") +def scalar_summary(tags, values, collections=None, name=None): + # pylint: disable=line-too-long + """Outputs a `Summary` protocol buffer with scalar values. + + This ops is deprecated. Please switch to tf.summary.scalar. + For an explanation of why this op was deprecated, and information on how to + migrate, look + ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) + + The input `tags` and `values` must have the same shape. The generated + summary has a summary value for each tag-value pair in `tags` and `values`. + + Args: + tags: A `string` `Tensor`. Tags for the summaries. + values: A real numeric Tensor. Values for the summaries. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope: + val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope) + _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) + return val + + +ops.NotDifferentiable("HistogramSummary") +ops.NotDifferentiable("ImageSummary") +ops.NotDifferentiable("AudioSummary") +ops.NotDifferentiable("AudioSummaryV2") +ops.NotDifferentiable("MergeSummary") +ops.NotDifferentiable("ScalarSummary") +ops.NotDifferentiable("TensorSummary") +ops.NotDifferentiable("TensorSummaryV2") +ops.NotDifferentiable("Timestamp") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/map_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/map_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d6e809b89a4d76797ede5f33f960c1e27cfdee --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/map_ops.py @@ -0,0 +1,72 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Ops to manipulate hashmap of tensors.""" + +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import gen_map_ops +from tensorflow.python.ops.gen_map_ops import * + +ops.NotDifferentiable("EmptyTensorMap") + +def empty_tensor_map(): + return gen_map_ops.empty_tensor_map() + +def tensor_map_size(input_handle): + return gen_map_ops.tensor_map_size(input_handle) + +def tensor_map_insert(input_handle, key, value): + return gen_map_ops.tensor_map_insert(input_handle, key, value) + +def tensor_map_lookup(input_handle, key, value_dtype): + return gen_map_ops.tensor_map_lookup(input_handle, key, value_dtype) + +def tensor_map_erase(input_handle, key, value_dtype): + return gen_map_ops.tensor_map_erase(input_handle, key, value_dtype) + +def tensor_map_has_key(input_handle, key): + return gen_map_ops.tensor_map_has_key(input_handle, key) + + +def tensor_map_stack_keys(input_handle, key_dtype): + return gen_map_ops.tensor_map_stack_keys(input_handle, key_dtype) + + +@ops.RegisterGradient("TensorMapLookup") +def LookupGrad(op, dval): + _, k = op.inputs + map_grad = empty_tensor_map() + map_grad = tensor_map_insert(map_grad, k, dval) + key_grad = None + return map_grad, key_grad + +@ops.RegisterGradient("TensorMapInsert") +def InsertGrad(op, dmap): + _, k, v = op.inputs + key_grad = None + (value_grad, map_grad) = cond.cond( + tensor_map_has_key(dmap, k), lambda: + (tensor_map_lookup(dmap, k, v.dtype), tensor_map_erase(dmap, k, v.dtype)), + lambda: (array_ops.zeros_like(v), dmap)) + return map_grad, key_grad, value_grad + +@ops.RegisterGradient("TensorMapErase") +def EraseGrad(op, dmap): + key_grad = None + map_grad = dmap + return map_grad, key_grad diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_grad.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe7047cbd42efd92e73b35c2b95ee5524882934 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/math_grad.py @@ -0,0 +1,2047 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradients for operators defined in math_ops.py.""" +import numpy as np + +from tensorflow.python.compat import compat +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices as indexed_slices_lib +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import special_math_ops + + +@ops.RegisterGradient("ArgMax") +def _ArgMaxGrad(op: ops.Operation, grad): + del op, grad + return [None, None] + + +@ops.RegisterGradient("ArgMin") +def _ArgMinGrad(op: ops.Operation, grad): + del op, grad + return [None, None] + + +@ops.RegisterGradient("EuclideanNorm") +def _EuclideanNormGrad(op: ops.Operation, grad): + """Gradient for EuclideanNorm.""" + + output = op.outputs[0] + + if not op.get_attr("keep_dims"): + output_shape_kept_dims = math_ops.reduced_shape( + array_ops.shape(op.inputs[0]), op.inputs[1]) + output = array_ops.reshape(output, output_shape_kept_dims) + grad = array_ops.reshape(grad, output_shape_kept_dims) + + return math_ops.truediv(op.inputs[0], output / grad), None + + +def SmartBroadcastGradientArgs(x, y, grad=None): + """Version of `BroadcastGradientArgs` optimized for partially-known shapes. + + Args: + x: The first argument of a broadcasting binary op. + y: The second argument of a broadcasting binary op. + grad: Deprecated. + + Returns: + A pair of triples, one per argument with + * Shape of the argument (tensor); + * Reduction axes for the argument (list or tensor); + * Boolean indicating whether the reduction must be applied. + """ + del grad + x_shape = array_ops.shape(x) + y_shape = array_ops.shape(y) + + if (not context.executing_eagerly() and + isinstance(x, tensor.Tensor) and + isinstance(y, tensor.Tensor)): + x_axes, y_axes = _InferGradientReductionAxes(x.shape, y.shape) + else: + x_axes, y_axes = None, None + + if x_axes is None or y_axes is None: + # NOTE: In graph mode, this is never exercised for statically known shapes. + x_axes, y_axes = gen_array_ops.broadcast_gradient_args(x_shape, y_shape) + x_must_reduce = True + y_must_reduce = True + else: + x_must_reduce = x_axes or x.shape.rank < y.shape.rank + y_must_reduce = y_axes or y.shape.rank < x.shape.rank + + return (x_shape, x_axes, x_must_reduce), (y_shape, y_axes, y_must_reduce) + + +def _InferGradientReductionAxes(x_shape, y_shape): + """Infers the sets of axes that might have been broadcasted.""" + x_rank = x_shape.rank + y_rank = y_shape.rank + if x_rank is None or y_rank is None: + return None, None + + # Convert shapes for V1 compatibility, can be omitted in V2. + x_shape = x_shape.as_list() + y_shape = y_shape.as_list() + + b_rank = max(x_rank, y_rank) + x_axes = [] + y_axes = [] + for axis in range(b_rank): + x_dim = 1 if axis < b_rank - x_rank else x_shape[axis - (b_rank - x_rank)] + y_dim = 1 if axis < b_rank - y_rank else y_shape[axis - (b_rank - y_rank)] + if x_dim == 1 and y_dim != 1: + # It's safe to assume that x_dim was broadcasted. + x_axes.append(axis) + elif y_dim == 1 and x_dim != 1: + # It's safe to assume that y_dim was broadcasted. + y_axes.append(axis) + elif x_dim is None or y_dim is None: + # Broadcasting decision is dynamic (data-dependent). + return None, None + + return x_axes, y_axes + + +def _ReduceGradientArg(grad, shape_axes_must_reduce): + """Reduces gradients of one of the arguments of a broadcasting binary op.""" + shape, axes, must_reduce = shape_axes_must_reduce + if grad is not None and must_reduce: + # Applying keepdims=True in presence of unknown axes opens up some + # opportunities for optimizations. For example, _SumGrad below won't have to + # emit extra ops to recover reduced indices for broadcasting. + grad = math_ops.reduce_sum(grad, axes, keepdims=True) + grad = array_ops.reshape(grad, shape) + return grad + + +def _ReduceGradientArgs(x, y, gx, gy): + """Reduces gradients of both arguments of a broadcasting binary op.""" + if gx is not None or gy is not None: + bx, by = SmartBroadcastGradientArgs(x, y) + gx = _ReduceGradientArg(gx, bx) + gy = _ReduceGradientArg(gy, by) + return gx, gy + + +_EMPTY_TUPLE = () + + +def _IsScalar(x): + return x._shape_tuple() is _EMPTY_TUPLE # pylint: disable=protected-access + + +def _SafeShapeDiv(x, y): + """Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.""" + return x // math_ops.maximum(y, 1) + + +@ops.RegisterGradient("Sum") +def _SumGrad(op: ops.Operation, grad): + """Gradient for Sum.""" + # Fast path for when reducing to a scalar and ndims is known: adds only + # Reshape and Tile ops (and possibly a Shape). + input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access + if input_0_shape is not None: + axes = tensor_util.constant_value(op.inputs[1]) + if axes is not None: + rank = len(input_0_shape) + if np.array_equal(axes, np.arange(rank)): # Reduce all dims. + if context.executing_eagerly(): + ctx = context.context() + new_shape = ctx.ones_rank_cache().get(rank) + if new_shape is None: + new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32) + ctx.ones_rank_cache().put(rank, new_shape) + else: + new_shape = [1] * rank + grad = array_ops.reshape(grad, new_shape) + # If shape is not fully defined (but rank is), we use Shape. + if None not in input_0_shape: + input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32) + else: + input_shape = array_ops.shape(op.inputs[0]) + return [array_ops.tile(grad, input_shape), None] + elif None not in input_0_shape and not context.executing_eagerly(): + # The shape and reduction indices are statically known, so we use a + # graph-level cache to avoid recomputing `reduced_shape()` for each + # invocation. + graph = ops.get_default_graph() + + # Canonicalize `axes` to be a tuple of indices. The incoming + # value may be a scalar or a vector, and may include negative indices. + axes = tuple(axes.reshape(-1)) + + try: + output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access + (input_0_shape, axes)] + except KeyError: + + # Compute and cache `output_shape_kept_dims` and `tile_scaling`. + def EvaluateAsTuple(t): + if tensor_util.is_tf_type(t): + value = tensor_util.try_evaluate_constant(t) + assert value is not None + else: + value = t + return tuple(value) + + output_shape_kept_dims = EvaluateAsTuple( + math_ops.reduced_shape(input_0_shape, axes)) + tile_scaling = EvaluateAsTuple( + _SafeShapeDiv(input_0_shape, output_shape_kept_dims)) + graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access + output_shape_kept_dims, tile_scaling) + + grad = array_ops.reshape(grad, output_shape_kept_dims) + return [array_ops.tile(grad, tile_scaling), None] + + input_shape = array_ops.shape(op.inputs[0]) + + if not op.get_attr("keep_dims"): + with ops.colocate_with(input_shape): + # TODO(apassos) remove this once device placement for eager ops makes + # more sense. + output_shape_kept_dims = math_ops.reduced_shape(input_shape, + op.inputs[1]) + grad = array_ops.reshape(grad, output_shape_kept_dims) + return [array_ops.broadcast_to(grad, input_shape), None] + + +def _MinOrMaxGrad(op: ops.Operation, grad): + """Gradient for Min or Max. Amazingly it's precisely the same code.""" + input_shape = array_ops.shape(op.inputs[0]) + y = op.outputs[0] + if not op.get_attr("keep_dims"): + output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) + y = array_ops.reshape(y, output_shape_kept_dims) + grad = array_ops.reshape(grad, output_shape_kept_dims) + else: + output_shape_kept_dims = array_ops.shape(y) + + # Compute the number of selected (maximum or minimum) elements in each + # reduction dimension. If there are multiple minimum or maximum elements + # then the gradient will be divided between them. + indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) + num_selected = array_ops.reshape( + math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) + + return [math_ops.divide(indicators, num_selected) * grad, None] + + +@ops.RegisterGradient("Max") +def _MaxGrad(op: ops.Operation, grad): + """Gradient for Max.""" + return _MinOrMaxGrad(op, grad) + + +@ops.RegisterGradient("Min") +def _MinGrad(op: ops.Operation, grad): + return _MinOrMaxGrad(op, grad) + + +@ops.RegisterGradient("Mean") +def _MeanGrad(op: ops.Operation, grad): + """Gradient for Mean.""" + sum_grad = _SumGrad(op, grad)[0] + input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access + output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access + if (input_shape is not None and output_shape is not None and + None not in input_shape and None not in output_shape): + input_size = np.prod(input_shape) + output_size = np.prod(output_shape) + factor = input_size // max(output_size, 1) + factor = constant_op.constant(factor, dtype=sum_grad.dtype) + else: + input_shape = array_ops.shape(op.inputs[0]) + input_rank = array_ops.size(input_shape) + axes = (op.inputs[1] + input_rank) % input_rank + factor = math_ops.reduce_prod(array_ops.gather(input_shape, axes)) + return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None + + +@ops.RegisterGradient("Prod") +def _ProdGrad(op: ops.Operation, grad): + """Gradient for Prod.""" + # The gradient can be expressed by dividing the product by each entry of the + # input tensor, but this approach can't deal with zeros in the input. + # Here, we avoid this problem by composing the output as a product of two + # cumprod operations. + + input_shape = array_ops.shape(op.inputs[0]) + # Reshape reduction indices for the case where the parameter is a scalar + reduction_indices = array_ops.reshape(op.inputs[1], [-1]) + + # Expand grad to full input shape + if not op.get_attr("keep_dims"): + output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) + grad = array_ops.reshape(grad, output_shape_kept_dims) + + grad = array_ops.broadcast_to(grad, input_shape) + + # Pack all reduced dimensions into a single one, so we can perform the + # cumprod ops. If the reduction dims list is empty, it defaults to float32, + # so we need to cast here. We put all the shape-related ops on CPU to avoid + # copying back and forth, and since listdiff is CPU only. + with ops.device("/cpu:0"): + rank = array_ops.rank(op.inputs[0]) + reduction_indices = (reduction_indices + rank) % rank + reduced = math_ops.cast(reduction_indices, dtypes.int32) + idx = math_ops.range(0, rank) + other, _ = gen_array_ops.list_diff(idx, reduced, dtypes.int32) + perm = array_ops.concat([reduced, other], 0) + reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) + other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) + permuted = array_ops.transpose(op.inputs[0], perm) + permuted_shape = array_ops.shape(permuted) + reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) + + # Calculate product, leaving out the current entry + left = math_ops.cumprod(reshaped, axis=0, exclusive=True) + right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) + # For complex inputs, the gradient is in the conjugate direction. + y = array_ops.reshape( + math_ops.conj(left) * math_ops.conj(right), permuted_shape) + + # Invert the transpose and reshape operations. + # Make sure to set the statically known shape information through a reshape. + out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) + return array_ops.reshape(out, input_shape), None + + +@ops.RegisterGradient("SegmentSum") +def _SegmentSumGrad(op: ops.Operation, grad): + """Gradient for SegmentSum.""" + return array_ops.gather(grad, op.inputs[1]), None + + +@ops.RegisterGradient("SegmentMean") +def _SegmentMeanGrad(op: ops.Operation, grad): + """Gradient for SegmentMean.""" + input_rank = array_ops.rank(op.inputs[0]) + ones_shape = array_ops.concat([ + array_ops.shape(op.inputs[1]), + array_ops.ones( + array_ops.expand_dims(input_rank - 1, 0), dtype=dtypes.int32) + ], 0) + ones = array_ops.ones(ones_shape, dtype=grad.dtype) + scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1])) + return array_ops.gather(scaled_grad, op.inputs[1]), None + + +def _SparseSegmentReduceGradV2(op, grad, norm=None): + """Sparse gradient for SparseSegment(Sum|Mean|SqrtN)[WithNumSegments].""" + assert norm is None or norm == "mean" or norm == "sqrtn" + data = op.inputs[0] + indices = op.inputs[1] + segment_ids = op.inputs[2] + data_shape = array_ops.shape(op.inputs[0]) + dense_output_dim0 = data_shape[0] + grad_fn = ( + math_ops.sparse_segment_mean_grad_v2 + if norm == "mean" + else math_ops.sparse_segment_sqrt_n_grad_v2 + if norm == "sqrtn" + else math_ops.sparse_segment_sum_grad_v2 + ) + grad_values, sorted_unique_indices = grad_fn( + grad, indices, segment_ids, dense_output_dim0 + ) + return indexed_slices_lib.IndexedSlices( + grad_values, sorted_unique_indices, data_shape + ) + + +def _GetOpAttrOrNone(op, name): + """Returns the value of the attr of `op` with the given `name`, or None if no + + such attr exists. + """ + try: + return op.get_attr(name) + except ValueError: + return None + + +@ops.RegisterGradient("SparseSegmentSum") +def _SparseSegmentSumGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentSum.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad), None, None + dim0 = array_ops.shape(op.inputs[0])[0] + if compat.forward_compatible(2021, 6, 10): + return (math_ops.sparse_segment_sum_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None) + else: + return (math_ops.unsorted_segment_sum( + array_ops.gather(grad, op.inputs[2]), op.inputs[1], dim0), None, None) + + +@ops.RegisterGradient("SparseSegmentSumWithNumSegments") +def _SparseSegmentSumWithNumSegmentsGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentSumWithNumSegments.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad), None, None, None + dim0 = array_ops.shape(op.inputs[0])[0] + if compat.forward_compatible(2021, 6, 10): + return (math_ops.sparse_segment_sum_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None, None) + else: + return (math_ops.unsorted_segment_sum( + array_ops.gather(grad, op.inputs[2]), op.inputs[1], + dim0), None, None, None) + + +@ops.RegisterGradient("SparseSegmentMean") +def _SparseSegmentMeanGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentMean.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad, "mean"), None, None + dim0 = array_ops.shape(op.inputs[0])[0] + return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None) + + +@ops.RegisterGradient("SparseSegmentMeanWithNumSegments") +def _SparseSegmentMeanWithNumSegmentsGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentMeanWithNumSegments.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad, "mean"), None, None, None + dim0 = array_ops.shape(op.inputs[0])[0] + return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None, None) + + +@ops.RegisterGradient("SparseSegmentSqrtN") +def _SparseSegmentSqrtNGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentSqrtN.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad, "sqrtn"), None, None + dim0 = array_ops.shape(op.inputs[0])[0] + return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None) + + +@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments") +def _SparseSegmentSqrtNWithNumSegmentsGrad(op: ops.Operation, grad): + """Gradient for SparseSegmentSqrtNWithNumSegments.""" + if _GetOpAttrOrNone(op, "sparse_gradient"): + return _SparseSegmentReduceGradV2(op, grad, "sqrtn"), None, None, None + dim0 = array_ops.shape(op.inputs[0])[0] + return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], + dim0), None, None, None) + + +def _SegmentMinOrMaxGrad(op: ops.Operation, grad): + """ Gradient for SegmentMin and SegmentMax. """ + zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype) + # Get the number of selected (minimum or maximum) elements in each segment. + gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1]) + is_selected = math_ops.equal(op.inputs[0], gathered_outputs) + num_selected = math_ops.segment_sum( + math_ops.cast(is_selected, grad.dtype), op.inputs[1]) + # Compute the gradient for each segment. The gradient for the ith segment is + # divided evenly among the selected elements in that segment. + weighted_grads = math_ops.divide(grad, num_selected) + gathered_grads = array_ops.gather(weighted_grads, op.inputs[1]) + return array_ops.where_v2(is_selected, gathered_grads, zeros), None + + +@ops.RegisterGradient("SegmentMin") +def _SegmentMinGrad(op: ops.Operation, grad): + """Gradient for SegmentMin.""" + return _SegmentMinOrMaxGrad(op, grad) + + +@ops.RegisterGradient("SegmentMax") +def _SegmentMaxGrad(op: ops.Operation, grad): + """Gradient for SegmentMax.""" + return _SegmentMinOrMaxGrad(op, grad) + + +# pylint: disable=g-doc-args +@ops.RegisterGradient("SegmentProd") +def _SegmentProdGrad(op: ops.Operation, grad): + """Gradient for SegmentProd. + + The gradient can be expressed for each segment by dividing the segment's + product by each element of the segment input tensor, but this approach can't + deal with zeros in the input. + Unlike reduce_prod we can't use cumsum here as individual segments may have + a different number of elements. Therefore we consider three cases: + 1) A segment input contains no zeros and we can safely divide by the input + tensor. + 2) A segment contains exactly one zero. Then the gradient of each input of + the segment is zero except for the 0-input, there the gradient is + the product of the remaining segment entries. + 3) A segment contains at least two zeros. The gradient is zero for all + segment inputs. + """ + data = op.inputs[0] + segment_ids = op.inputs[1] + is_zero = math_ops.equal(data, 0) + num_zeros = gen_math_ops.segment_sum( + math_ops.cast(is_zero, dtype=dtypes.int32), segment_ids) + # handle case 3 and set the gradient to 0 for segments with more than one + # 0 as input + grad = array_ops.where_v2( + math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad) + # replace all zeros with ones and compute the segment_prod + non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(data), data) + non_zero_prod = gen_math_ops.segment_prod(non_zero_data, segment_ids) + gathered_prod = array_ops.gather(op.outputs[0], segment_ids) + gathered_non_zero_prod = array_ops.gather(non_zero_prod, segment_ids) + prod_divided_by_el = gathered_prod / non_zero_data + # Now fetch the individual results for segments containing 0 and those that + # don't. + partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod, + prod_divided_by_el) + gathered_grad = array_ops.gather(grad, segment_ids) + return gathered_grad * partial_derivative, None + + +def _GatherDropNegatives(params, + ids, + zero_clipped_indices=None, + is_positive=None): + """ Helper function for unsorted segment ops. + + Gathers params for + positive segment ids and gathers 0 for inputs with negative segment id. + Also returns the clipped indices and a boolean mask with the same shape + as ids where a positive id is masked as true. With this, the latter two + can be passed as arguments to this function to reuse them. + """ + if zero_clipped_indices is None: + zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids)) + gathered = array_ops.gather(params, zero_clipped_indices) + if is_positive is None: + is_positive = math_ops.greater_equal(ids, 0) + # tf.where(condition, x, y) requires condition to have the same shape as x + # and y. + is_positive_shape = array_ops.shape(is_positive) + broadcastable_shape = array_ops.concat( + [ + is_positive_shape, + array_ops.ones( + [array_ops.rank(gathered) - array_ops.rank(is_positive)], + dtype=is_positive_shape.dtype, + ), + ], + axis=0, + ) + is_positive = array_ops.reshape(is_positive, broadcastable_shape) + is_positive = is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool) + # replace gathered params of negative indices with 0 + zero_slice = array_ops.zeros_like(gathered) + return ( + array_ops.where_v2(is_positive, gathered, zero_slice), + zero_clipped_indices, + is_positive, + ) + + +def _UnsortedSegmentMinOrMaxGrad(op: ops.Operation, grad): + """Gradient for UnsortedSegmentMin and UnsortedSegmentMax.""" + # Get the number of selected (minimum or maximum) elements in each segment. + gathered_outputs, zero_clipped_indices, is_positive = _GatherDropNegatives( + op.outputs[0], op.inputs[1] + ) + is_selected = math_ops.equal(op.inputs[0], gathered_outputs) + is_selected = math_ops.logical_and(is_selected, is_positive) + num_selected = math_ops.unsorted_segment_sum( + math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2] + ) + # Compute the gradient for each segment. The gradient for the ith segment is + # divided evenly among the selected elements in that segment. + weighted_grads = math_ops.divide(grad, num_selected) + gathered_grads, _, _ = _GatherDropNegatives( + weighted_grads, None, zero_clipped_indices, is_positive + ) + zeros = array_ops.zeros_like(gathered_grads) + return array_ops.where_v2(is_selected, gathered_grads, zeros), None, None + + +@ops.RegisterGradient("UnsortedSegmentSum") +def _UnsortedSegmentSumGrad(op: ops.Operation, grad): + """Gradient for UnsortedSegmentSum.""" + return _GatherDropNegatives(grad, op.inputs[1])[0], None, None + + +@ops.RegisterGradient("UnsortedSegmentMax") +def _UnsortedSegmentMaxGrad(op: ops.Operation, grad): + """ Gradient for UnsortedSegmentMax. """ + return _UnsortedSegmentMinOrMaxGrad(op, grad) + + +@ops.RegisterGradient("UnsortedSegmentMin") +def _UnsortedSegmentMinGrad(op: ops.Operation, grad): + """ Gradient for UnsortedSegmentMin. """ + return _UnsortedSegmentMinOrMaxGrad(op, grad) + + +@ops.RegisterGradient("UnsortedSegmentProd") +def _UnsortedSegmentProdGrad(op: ops.Operation, grad): + """ Gradient for UnsortedSegmentProd. + + The gradient can be expressed for each segment by dividing the segment's + product by each element of the segment input tensor, but this approach can't + deal with zeros in the input. + Unlike reduce_prod we can't use cumsum here as individual segments may have + a different number of elements. Therefore we consider three cases: + 1) A segment input contains no zeros and we can safely divide by the input + tensor. + 2) A segment contains exactly one zero. Then the gradient of each input of + the segment is zero except for the 0-input, there the gradient is + the product of the remaining segment entries. + 3) A segment contains at least two zeros. The gradient is zero for all + segment inputs. + """ + # Note that unsorted_segment_sum will filter out the negative indices, + # so we don't need to do a logical_and with is_positive here + is_zero = math_ops.equal(op.inputs[0], 0) + num_zeros = gen_math_ops.unsorted_segment_sum( + math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2]) + # handle case 3 and set the gradient to 0 for segments with more than one + # 0 as input + grad = array_ops.where_v2( + math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad) + # replace all zeros with ones and compute the unsorted_segment_prod + non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(op.inputs[0]), + op.inputs[0]) + non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data, + op.inputs[1], op.inputs[2]) + # clip the indices for gather to be positive + zero_clipped_indices = math_ops.maximum(op.inputs[1], + array_ops.zeros_like(op.inputs[1])) + gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices) + gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices) + prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf. + # Now fetch the individual results for segments containing 0 and those that + # don't. is_zero will also fetch results for entries with negative index + # but the following gather_drop_negatives sets the corresponding entry in + # grad to 0 for these + partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod, + prod_divided_by_el) + gathered_grad = _GatherDropNegatives(grad, op.inputs[1], + zero_clipped_indices)[0] + return gathered_grad * partial_derivative, None, None + + +@ops.RegisterGradient("Abs") +def _AbsGrad(op: ops.Operation, grad): + x = op.inputs[0] + return grad * math_ops.sign(x) + + +@ops.RegisterGradient("Neg") +def _NegGrad(_, grad): + """Returns -grad.""" + return -grad + + +@ops.RegisterGradient("Inv") +def _InvGrad(op: ops.Operation, grad): + """Returns -grad * (1 / x^2).""" + y = op.outputs[0] # y = 1 / x + return gen_math_ops.reciprocal_grad(y, grad) + + +@ops.RegisterGradient("Reciprocal") +def _ReciprocalGrad(op: ops.Operation, grad): + """Returns -grad * (1 / x^2).""" + y = op.outputs[0] # y = 1 / x + return gen_math_ops.reciprocal_grad(y, grad) + + +@ops.RegisterGradient("InvGrad") +def _InvGradGrad(op: ops.Operation, grad): + b = op.inputs[1] + # op.output[0]: y = -b * conj(a)^2 + with ops.control_dependencies([grad]): + ca = math_ops.conj(op.inputs[0]) + cg = math_ops.conj(grad) + return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad) + + +@ops.RegisterGradient("ReciprocalGrad") +def _ReciprocalGradGrad(op: ops.Operation, grad): + b = op.inputs[1] + # op.output[0]: y = -b * conj(a)^2 + with ops.control_dependencies([grad]): + ca = math_ops.conj(op.inputs[0]) + cg = math_ops.conj(grad) + return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad) + + +@ops.RegisterGradient("Square") +def _SquareGrad(op: ops.Operation, grad): + x = op.inputs[0] + # Added control dependencies to prevent 2*x from being computed too early. + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + y = constant_op.constant(2.0, dtype=x.dtype) + return math_ops.multiply(grad, math_ops.multiply(x, y)) + + +@ops.RegisterGradient("Sqrt") +def _SqrtGrad(op: ops.Operation, grad): + y = op.outputs[0] # y = x^(1/2) + return gen_math_ops.sqrt_grad(y, grad) + + +@ops.RegisterGradient("SqrtGrad") +def _SqrtGradGrad(op: ops.Operation, grad): + a = op.inputs[0] + y = op.outputs[0] # y = 0.5 * b / conj(a) + with ops.control_dependencies([grad]): + ga = grad / a + return -math_ops.conj(ga) * y, 0.5 * ga # pylint: disable=invalid-unary-operand-type + + +@ops.RegisterGradient("Rsqrt") +def _RsqrtGrad(op: ops.Operation, grad): + """Returns -0.5 * grad * conj(y)^3.""" + y = op.outputs[0] # y = x^(-1/2) + return gen_math_ops.rsqrt_grad(y, grad) + + +@ops.RegisterGradient("RsqrtGrad") +def _RsqrtGradGrad(op: ops.Operation, grad): + """Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3.""" + a = op.inputs[0] # a = x^{-1/2} + b = op.inputs[1] # backprop gradient for a + with ops.control_dependencies([grad]): + ca = math_ops.conj(a) + cg = math_ops.conj(grad) + grad_a = -1.5 * cg * b * math_ops.square(ca) + grad_b = gen_math_ops.rsqrt_grad(ca, grad) + return grad_a, grad_b + + +@ops.RegisterGradient("Exp") +def _ExpGrad(op: ops.Operation, grad): + """Returns grad * exp(x).""" + y = op.outputs[0] # y = e^x + with ops.control_dependencies([grad]): + y = math_ops.conj(y) + return grad * y + + +@ops.RegisterGradient("Expm1") +def _Expm1Grad(op: ops.Operation, grad): + """Returns grad * exp(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + y = math_ops.exp(x) + return grad * y + + +@ops.RegisterGradient("Log") +def _LogGrad(op: ops.Operation, grad): + """Returns grad * (1/x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.reciprocal(x) + + +@ops.RegisterGradient("Log1p") +def _Log1pGrad(op: ops.Operation, grad): + """Returns grad * (1/(1 + x)).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.reciprocal(1 + x) + + +@ops.RegisterGradient("Xlogy") +def _XLogyGrad(op: ops.Operation, grad): + """Returns gradient of xlogy(x, y) with respect to x and y.""" + x = op.inputs[0] + y = op.inputs[1] + sx = array_ops.shape(x) + sy = array_ops.shape(y) + rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) + with ops.control_dependencies([grad]): + not_zero_x = math_ops.cast( + math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype) + partial_x = gen_math_ops.xlogy(not_zero_x, y) + partial_y = gen_math_ops.xdivy(x, y) + return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), + array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy)) + + +@ops.RegisterGradient("Xlog1py") +def _XLog1pyGrad(op: ops.Operation, grad): + """Returns gradient of xlog1py(x, y) with respect to x and y.""" + x = op.inputs[0] + y = op.inputs[1] + sx = array_ops.shape(x) + sy = array_ops.shape(y) + rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) + with ops.control_dependencies([grad]): + not_zero_x = math_ops.cast( + math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype) + partial_x = gen_math_ops.xlog1py(not_zero_x, y) + partial_y = gen_math_ops.xdivy(x, y + 1.) + return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), + array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy)) + + +@ops.RegisterGradient("Xdivy") +def _XDivyGrad(op: ops.Operation, grad): + """Returns gradient of xdivy(x, y) with respect to x and y.""" + x = op.inputs[0] + y = op.inputs[1] + sx = array_ops.shape(x) + sy = array_ops.shape(y) + rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) + with ops.control_dependencies([grad]): + not_zero_x = math_ops.cast( + math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype) + partial_x = gen_math_ops.xdivy(not_zero_x, y) + partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2) + return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), + array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy)) + + +@ops.RegisterGradient("Sinh") +def _SinhGrad(op: ops.Operation, grad): + """Returns grad * cosh(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.cosh(x) + + +@ops.RegisterGradient("Cosh") +def _CoshGrad(op: ops.Operation, grad): + """Returns grad * sinh(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.sinh(x) + + +@ops.RegisterGradient("Tanh") +def _TanhGrad(op: ops.Operation, grad): + """Returns grad * (1 - tanh(x) * tanh(x)).""" + y = op.outputs[0] # y = tanh(x) + with ops.control_dependencies([grad]): + y = math_ops.conj(y) + return gen_math_ops.tanh_grad(y, grad) + + +@ops.RegisterGradient("Asinh") +def _AsinhGrad(op: ops.Operation, grad): + """Returns grad * 1/cosh(y).""" + y = op.outputs[0] + with ops.control_dependencies([grad]): + y = math_ops.conj(y) + return grad / math_ops.cosh(y) + + +@ops.RegisterGradient("Acosh") +def _AcoshGrad(op: ops.Operation, grad): + """Returns grad * 1/sinh(y).""" + y = op.outputs[0] + with ops.control_dependencies([grad]): + y = math_ops.conj(y) + return grad / math_ops.sinh(y) + + +@ops.RegisterGradient("Atanh") +def _AtanhGrad(op: ops.Operation, grad): + """Returns grad * 1/ (1 - x^2).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + x2 = math_ops.square(x) + one = constant_op.constant(1, dtype=grad.dtype) + inv = math_ops.reciprocal(math_ops.subtract(one, x2)) + return grad * inv + + +@ops.RegisterGradient("TanhGrad") +def _TanhGradGrad(op: ops.Operation, grad): + with ops.control_dependencies([grad]): + a = math_ops.conj(op.inputs[0]) + b = math_ops.conj(op.inputs[1]) + return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad) + + +@ops.RegisterGradient("Erf") +def _ErfGrad(op: ops.Operation, grad): + """Returns grad * 2/sqrt(pi) * exp(-x**2).""" + x = op.inputs[0] + two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype) + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x)) + + +@ops.RegisterGradient("Erfc") +def _ErfcGrad(op: ops.Operation, grad): + """Returns -grad * 2/sqrt(pi) * exp(-x**2).""" + x = op.inputs[0] + minus_two_over_root_pi = constant_op.constant( + -2 / np.sqrt(np.pi), dtype=grad.dtype) + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) + + +@ops.RegisterGradient("Erfinv") +def _ErfinvGrad(op: ops.Operation, grad): + """Returns grad * sqrt(pi) / 2 * exp(erfinv(x)**2).""" + root_pi_over_two = constant_op.constant(np.sqrt(np.pi) / 2, dtype=grad.dtype) + with ops.control_dependencies([grad]): + return grad * root_pi_over_two * math_ops.exp( + math_ops.square(op.outputs[0])) + + +@ops.RegisterGradient("Ndtri") +def _NdtriGrad(op: ops.Operation, grad): + """Returns grad * sqrt(2 * pi) * exp(ndtri(x)**2 / 2).""" + root_two_pi = constant_op.constant(np.sqrt(2 * np.pi), dtype=grad.dtype) + with ops.control_dependencies([grad]): + return grad * root_two_pi * math_ops.exp( + math_ops.square(op.outputs[0]) / 2.) + + +@ops.RegisterGradient("Lgamma") +def _LgammaGrad(op: ops.Operation, grad): + """Returns grad * digamma(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.digamma(x) + + +@ops.RegisterGradient("Digamma") +def _DigammaGrad(op: ops.Operation, grad): + """Compute gradient of the digamma function with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) + return grad * partial_x + + +@ops.RegisterGradient("Dawsn") +def _DawsnGrad(op: ops.Operation, grad): + """Compute gradient of dawsn(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + return grad * (1. - 2 * x * y) + + +@ops.RegisterGradient("Expint") +def _ExpintGrad(op: ops.Operation, grad): + """Compute gradient of expint(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + return grad * math_ops.exp(x) / x + + +@ops.RegisterGradient("FresnelCos") +def _FresnelCosGrad(op: ops.Operation, grad): + """Compute gradient of fresnel_cos(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + return grad * math_ops.cos((np.pi / 2.) * math_ops.square(x)) + + +@ops.RegisterGradient("FresnelSin") +def _FresnelSinGrad(op: ops.Operation, grad): + """Compute gradient of fresnel_sin(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + return grad * math_ops.sin((np.pi / 2.) * math_ops.square(x)) + + +@ops.RegisterGradient("Spence") +def _SpenceGrad(op: ops.Operation, grad): + """Compute gradient of spence(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + partial_x = math_ops.log(x) / (1 - x) + partial_x = array_ops.where( + math_ops.equal(x, 1.), -array_ops.ones_like(x), partial_x) # pylint: disable=invalid-unary-operand-type + return grad * partial_x + + +@ops.RegisterGradient("BesselI0") +def _BesselI0Grad(op: ops.Operation, grad): + """Compute gradient of bessel_i0(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + partial_x = special_math_ops.bessel_i1(x) + return grad * partial_x + + +@ops.RegisterGradient("BesselI0e") +def _BesselI0eGrad(op: ops.Operation, grad): + """Compute gradient of bessel_i0e(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + partial_x = (special_math_ops.bessel_i1e(x) - math_ops.sign(x) * y) + return grad * partial_x + + +@ops.RegisterGradient("BesselI1") +def _BesselI1Grad(op: ops.Operation, grad): + """Compute gradient of bessel_i1(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # For x = 0, the correct gradient is 1.0. + # However, the main branch gives NaN because of the division by x, so + # we impute the gradient manually. + # An alternative solution is to express the gradient via bessel_i0 and + # bessel_i2, but the latter is not yet implemented in Eigen. + dy_dx = array_ops.where_v2( + math_ops.equal(x, 0.), math_ops.cast(1., x.dtype), + special_math_ops.bessel_i0(x) - math_ops.div(y, x)) + return grad * dy_dx + + +@ops.RegisterGradient("BesselI1e") +def _BesselI1eGrad(op: ops.Operation, grad): + """Compute gradient of bessel_i1e(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # For x = 0, the correct gradient is 0.5. + # However, the main branch gives NaN because of the division by x, so + # we impute the gradient manually. + # An alternative solution is to express the gradient via bessel_i0e and + # bessel_i2e, but the latter is not yet implemented in Eigen. + dy_dx = array_ops.where_v2( + math_ops.equal(x, 0.), math_ops.cast(0.5, x.dtype), + special_math_ops.bessel_i0e(x) - y * + (math_ops.sign(x) + math_ops.reciprocal(x))) + return grad * dy_dx + + +@ops.RegisterGradient("BesselK0") +def _BesselK0Grad(op: ops.Operation, grad): + """Compute gradient of bessel_k0(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + partial_x = -special_math_ops.bessel_k1(x) + return grad * partial_x + + +@ops.RegisterGradient("BesselK0e") +def _BesselK0eGrad(op: ops.Operation, grad): + """Compute gradient of bessel_k0e(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + partial_x = (y - special_math_ops.bessel_k1e(x)) + return grad * partial_x + + +@ops.RegisterGradient("BesselK1") +def _BesselK1Grad(op: ops.Operation, grad): + """Compute gradient of bessel_k1(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # At 0., this is NaN which is fine since the derivative is undefined + # at 0. + partial_x = -special_math_ops.bessel_k0(x) - math_ops.div(y, x) + return grad * partial_x + + +@ops.RegisterGradient("BesselK1e") +def _BesselK1eGrad(op: ops.Operation, grad): + """Compute gradient of bessel_k1e(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # At 0., this is NaN which is fine since the derivative is undefined + # at 0. + partial_x = ( + y * (1. - math_ops.reciprocal(x)) - special_math_ops.bessel_k0e(x)) + return grad * partial_x + + +@ops.RegisterGradient("BesselJ0") +def _BesselJ0Grad(op: ops.Operation, grad): + """Compute gradient of bessel_j0(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + partial_x = -special_math_ops.bessel_j1(x) + return grad * partial_x + + +@ops.RegisterGradient("BesselJ1") +def _BesselJ1Grad(op: ops.Operation, grad): + """Compute gradient of bessel_j1(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # For x = 0, the correct gradient is 0.5. + # However, the main branch gives NaN because of the division by x, so + # we impute the gradient manually. + # An alternative solution is to express the gradient via bessel_i0e and + # bessel_i2e, but the latter is not yet implemented in Eigen. + dy_dx = array_ops.where_v2( + math_ops.equal(x, 0.), math_ops.cast(0.5, x.dtype), + special_math_ops.bessel_j0(x) - math_ops.div(y, x)) + return grad * dy_dx + + +@ops.RegisterGradient("BesselY0") +def _BesselY0Grad(op: ops.Operation, grad): + """Compute gradient of bessel_y0(x) with respect to its argument.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + partial_x = -special_math_ops.bessel_y1(x) + return grad * partial_x + + +@ops.RegisterGradient("BesselY1") +def _BesselY1Grad(op: ops.Operation, grad): + """Compute gradient of bessel_y1(x) with respect to its argument.""" + x = op.inputs[0] + y = op.outputs[0] + with ops.control_dependencies([grad]): + # At 0., this is NaN which is fine since the derivative is undefined + # at 0. + partial_x = special_math_ops.bessel_y0(x) - math_ops.div(y, x) + return grad * partial_x + + +@ops.RegisterGradient("Igamma") +def _IgammaGrad(op: ops.Operation, grad): + """Returns gradient of igamma(a, x) with respect to a and x.""" + a = op.inputs[0] + x = op.inputs[1] + sa = array_ops.shape(a) + sx = array_ops.shape(x) + ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx) + + with ops.control_dependencies([grad]): + partial_a = gen_math_ops.igamma_grad_a(a, x) + # Perform operations in log space before summing, because Gamma(a) + # and Gamma'(a) can grow large. + partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - + math_ops.lgamma(a)) + return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa), + array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) + + +@ops.RegisterGradient("Igammac") +def _IgammacGrad(op: ops.Operation, grad): + """Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.""" + igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad) + return (-igamma_grad_a, -igamma_grad_x) + + +@ops.RegisterGradient("Betainc") +def _BetaincGrad(op: ops.Operation, grad): + """Returns gradient of betainc(a, b, x) with respect to x.""" + # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b + a, b, x = op.inputs + + # two cases: x is a scalar and a/b are same-shaped tensors, or vice + # versa; so its sufficient to check against shape(a). + sa = array_ops.shape(a) + sx = array_ops.shape(x) + _, rx = gen_array_ops.broadcast_gradient_args(sa, sx) + + # Perform operations in log space before summing, because terms + # can grow large. + log_beta = ( + gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) - + gen_math_ops.lgamma(a + b)) + # We use xlog1py and xlogy since the derivatives should tend to + # zero one of the tails when a is 1. or b is 1. + partial_x = math_ops.exp(math_ops.xlog1py(b - 1, -x) + + math_ops.xlogy(a - 1, x) - log_beta) + + return ( + None, # da + None, # db + array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) + + +@ops.RegisterGradient("Zeta") +def _ZetaGrad(op: ops.Operation, grad): + """Returns gradient of zeta(x, q) with respect to x and q.""" + # TODO(tillahoffmann): Add derivative with respect to x + x = op.inputs[0] + q = op.inputs[1] + # Broadcast gradients + sx = array_ops.shape(x) + sq = array_ops.shape(q) + unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq) + # Evaluate gradient + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + q = math_ops.conj(q) + partial_q = -x * math_ops.zeta(x + 1, q) # pylint: disable=invalid-unary-operand-type + return (None, + array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) + + +@ops.RegisterGradient("Polygamma") +def _PolygammaGrad(op: ops.Operation, grad): + """Returns gradient of psi(n, x) with respect to n and x.""" + # TODO(tillahoffmann): Add derivative with respect to n + n = op.inputs[0] + x = op.inputs[1] + # Broadcast gradients + sn = array_ops.shape(n) + sx = array_ops.shape(x) + unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx) + # Evaluate gradient + with ops.control_dependencies([grad]): + n = math_ops.conj(n) + x = math_ops.conj(x) + partial_x = math_ops.polygamma(n + 1, x) + return (None, + array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) + + +@ops.RegisterGradient("Sigmoid") +def _SigmoidGrad(op: ops.Operation, grad): + """Returns grad * sigmoid(x) * (1 - sigmoid(x)).""" + y = op.outputs[0] # y = sigmoid(x) + with ops.control_dependencies([grad]): + y = math_ops.conj(y) + return gen_math_ops.sigmoid_grad(y, grad) + + +@ops.RegisterGradient("SigmoidGrad") +def _SigmoidGradGrad(op: ops.Operation, grad): + with ops.control_dependencies([grad]): + a = math_ops.conj(op.inputs[0]) + b = math_ops.conj(op.inputs[1]) + gb = grad * b + return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad) + + +@ops.RegisterGradient("Sign") +def _SignGrad(op: ops.Operation, _): + """Returns 0.""" + x = op.inputs[0] + return array_ops.zeros_like(x) + + +@ops.RegisterGradient("Sin") +def _SinGrad(op: ops.Operation, grad): + """Returns grad * cos(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return grad * math_ops.cos(x) + + +@ops.RegisterGradient("Cos") +def _CosGrad(op: ops.Operation, grad): + """Returns grad * -sin(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + return -grad * math_ops.sin(x) + + +@ops.RegisterGradient("Tan") +def _TanGrad(op: ops.Operation, grad): + """Returns grad * 1/sec^2(x).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + secx = math_ops.reciprocal(math_ops.cos(x)) + secx2 = math_ops.square(secx) + return secx2 * grad + + +@ops.RegisterGradient("Asin") +def _AsinGrad(op: ops.Operation, grad): + """Returns grad * 1/sqrt(1-x^2).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + x2 = math_ops.square(x) + one = constant_op.constant(1, dtype=grad.dtype) + den = math_ops.sqrt(math_ops.subtract(one, x2)) + inv = math_ops.reciprocal(den) + return grad * inv + + +@ops.RegisterGradient("Acos") +def _AcosGrad(op: ops.Operation, grad): + """Returns grad * -1/sqrt(1-x^2).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + x2 = math_ops.square(x) + one = constant_op.constant(1, dtype=grad.dtype) + den = math_ops.sqrt(math_ops.subtract(one, x2)) + inv = math_ops.reciprocal(den) + return -grad * inv + + +@ops.RegisterGradient("Atan") +def _AtanGrad(op: ops.Operation, grad): + """Returns grad * 1/ (1 + x^2).""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + x = math_ops.conj(x) + x2 = math_ops.square(x) + one = constant_op.constant(1, dtype=grad.dtype) + inv = math_ops.reciprocal(math_ops.add(one, x2)) + return grad * inv + + +@ops.RegisterGradient("Atan2") +def _Atan2Grad(op: ops.Operation, grad): + """Returns grad * x / (y^2 + x^2), grad * -y / (y^2 + x^2).""" + y = op.inputs[0] + x = op.inputs[1] + with ops.control_dependencies([grad]): + grad_inv = grad / (math_ops.square(y) + math_ops.square(x)) + gy = x * grad_inv + gx = -y * grad_inv + # pylint: disable=arguments-out-of-order + return _ReduceGradientArgs(y, x, gy, gx) + # pylint: enable=arguments-out-of-order + + +@ops.RegisterGradient("AddN") +def _AddNGrad(op: ops.Operation, grad): + """Copies the gradient to all inputs.""" + # Not broadcasting. + return [grad] * len(op.inputs) + + +def _ShapesFullySpecifiedAndEqual(x, y, grad): + # pylint: disable=protected-access + x_shape = x._shape_tuple() + y_shape = y._shape_tuple() + grad_shape = grad._shape_tuple() + # pylint: enable=protected-access + return (x_shape == y_shape and x_shape == grad_shape and + x_shape is not None and None not in x_shape) + + +@ops.RegisterGradient("Add") +@ops.RegisterGradient("AddV2") +def _AddGrad(op: ops.Operation, grad): + """Gradient for Add.""" + y = op.inputs[1] + try: + skip_input_indices = op.skip_input_indices or () + if 1 in skip_input_indices and _IsScalar(y): + return grad, None + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + + x = op.inputs[0] + if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual( + x, y, grad + ): + return grad, grad + + gx = None if 0 in skip_input_indices else grad + gy = None if 1 in skip_input_indices else grad + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Sub") +def _SubGrad(op: ops.Operation, grad): + """Gradient for Sub.""" + y = op.inputs[1] + try: + skip_input_indices = op.skip_input_indices or () + if 1 in skip_input_indices and _IsScalar(y): + return grad, None + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + + x = op.inputs[0] + if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual( + x, y, grad + ): + return grad, -grad + + gx = None if 0 in skip_input_indices else grad + gy = None if 1 in skip_input_indices else -grad + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Mul") +def _MulGrad(op: ops.Operation, grad): + """The gradient of scalar multiplication.""" + y = op.inputs[1] + try: + skip_input_indices = op.skip_input_indices or () + if 1 in skip_input_indices and _IsScalar(y): + return gen_math_ops.mul(grad, math_ops.conj(y)), None + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + + x = op.inputs[0] + if ( + isinstance(grad, tensor.Tensor) + and _ShapesFullySpecifiedAndEqual(x, y, grad) + and grad.dtype in (dtypes.int32, dtypes.float32) + ): + return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) + assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype) + + if 0 in skip_input_indices: + gx = None + else: + gx = gen_math_ops.mul(grad, math_ops.conj(y)) + + if 1 in skip_input_indices: + gy = None + else: + gy = gen_math_ops.mul(math_ops.conj(x), grad) + + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("MulNoNan") +def _MulNoNanGrad(op: ops.Operation, grad): + """The gradient of scalar multiplication with NaN-suppression.""" + x = op.inputs[0] + y = op.inputs[1] + if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual( + x, y, grad + ): + return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad) + + assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype) + gx = gen_math_ops.mul_no_nan(grad, y) + gy = gen_math_ops.mul_no_nan(x, grad) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Div") +def _DivGrad(op: ops.Operation, grad): + """The gradient for the Div operator.""" + x = op.inputs[0] + y = op.inputs[1] + cx = math_ops.conj(x) + cy = math_ops.conj(y) + gx = math_ops.divide(grad, cy) + gy = grad * math_ops.divide(math_ops.divide(-cx, cy), cy) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("FloorDiv") +def _FloorDivGrad(_, unused_grad): + """The gradient for the FloorDiv operator.""" + return None, None + + +@ops.RegisterGradient("FloorMod") +def _FloorModGrad(op: ops.Operation, grad): + """Returns grad * (1, -floor(x/y)).""" + x = math_ops.conj(op.inputs[0]) + y = math_ops.conj(op.inputs[1]) + floor_xy = math_ops.floor_div(x, y) + gx = grad + gy = grad * math_ops.negative(floor_xy) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("TruncateDiv") +def _TruncateDivGrad(_, unused_grad): + return None, None + + +@ops.RegisterGradient("RealDiv") +def _RealDivGrad(op: ops.Operation, grad): + """RealDiv op gradient.""" + x = op.inputs[0] + y = op.inputs[1] + cx = math_ops.conj(op.inputs[0]) + cy = math_ops.conj(op.inputs[1]) + gx = math_ops.realdiv(grad, cy) + gy = grad * math_ops.realdiv(math_ops.realdiv(-cx, cy), cy) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("DivNoNan") +def _DivNoNanGrad(op: ops.Operation, grad): + """DivNoNan op gradient.""" + x = math_ops.conj(op.inputs[0]) + y = math_ops.conj(op.inputs[1]) + gx = math_ops.div_no_nan(grad, y) + gy = grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Pow") +def _PowGrad(op: ops.Operation, grad): + """Returns grad * (y*x^(y-1), z*log(x)).""" + x = op.inputs[0] + y = op.inputs[1] + cx = math_ops.conj(x) + cy = math_ops.conj(y) + try: + skip_input_indices = op.skip_input_indices or () + if 1 in skip_input_indices and _IsScalar(y): + return grad * cy * math_ops.pow(cx, cy - 1), None + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + + if 0 in skip_input_indices: + gx = None + else: + gx = grad * cy * math_ops.pow(cx, cy - 1) + + if 1 in skip_input_indices: + gy = None + else: + # Avoid false singularity at x = 0 + if x.dtype.is_complex: + # real(x) < 0 is fine for the complex case + mask = math_ops.not_equal(cx, 0) + else: + # There's no sensible real value to return if x < 0, so return 0 + mask = cx > 0 + safe_x = array_ops.where(mask, cx, array_ops.ones_like(x)) + log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x)) + gy = grad * math_ops.conj(op.outputs[0]) * log_x + + return _ReduceGradientArgs(x, y, gx, gy) + + +def _MaximumMinimumGradInputOnly(op: ops.Operation, grad, selector_op): + x = op.inputs[0] + y = op.inputs[1] + zeros = array_ops.zeros_like(grad) + xmask = selector_op(x, y) + xgrad = array_ops.where_v2(xmask, grad, zeros) + ygrad = None # Return None for ygrad since the config allows that. + return (xgrad, ygrad) + + +def _MaximumMinimumGrad(op: ops.Operation, grad, selector_op): + """Factor out the code for the gradient of Maximum or Minimum.""" + y = op.inputs[1] + try: + skip_input_indices = op.skip_input_indices or () + if 1 in skip_input_indices and _IsScalar(y): + # When we want to get gradients for the first input only, and the second + # input tensor is a scalar, we can do a much simpler calculation + return _MaximumMinimumGradInputOnly(op, grad, selector_op) + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + x = op.inputs[0] + zeros = array_ops.zeros_like(grad) + xmask = selector_op(x, y) + if 0 in skip_input_indices: + gx = None + else: + gx = array_ops.where_v2(xmask, grad, zeros) + if 1 in skip_input_indices: + gy = None + else: + gy = array_ops.where_v2(xmask, zeros, grad) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Maximum") +def _MaximumGrad(op: ops.Operation, grad): + """Returns grad*(x >= y, x < y) with type of grad.""" + return _MaximumMinimumGrad(op, grad, math_ops.greater_equal) + + +@ops.RegisterGradient("Minimum") +def _MinimumGrad(op: ops.Operation, grad): + """Returns grad*(x <= y, x > y) with type of grad.""" + return _MaximumMinimumGrad(op, grad, math_ops.less_equal) + + +@ops.RegisterGradient("SquaredDifference") +def _SquaredDifferenceGrad(op: ops.Operation, grad): + """Returns the gradient for (x-y)^2.""" + x = op.inputs[0] + y = op.inputs[1] + try: + skip_input_indices = op.skip_input_indices or () + except AttributeError: + # No gradient skipping, so do the full gradient computation + skip_input_indices = () + + with ops.control_dependencies([grad]): + # The parens ensure that if grad is IndexedSlices, it'll get multiplied by + # Tensor (not a number like 2.0) which causes it to convert to Tensor. + x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) + + if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual( + x, y, grad + ): + return x_grad, -x_grad + + gx = None if 0 in skip_input_indices else x_grad + gy = None if 1 in skip_input_indices else -x_grad + return _ReduceGradientArgs(x, y, gx, gy) + + +# Logical operations have no gradients. +ops.NotDifferentiable("Less") +ops.NotDifferentiable("LessEqual") +ops.NotDifferentiable("Greater") +ops.NotDifferentiable("GreaterEqual") +ops.NotDifferentiable("Equal") +ops.NotDifferentiable("ApproximateEqual") +ops.NotDifferentiable("NotEqual") +ops.NotDifferentiable("LogicalAnd") +ops.NotDifferentiable("LogicalOr") +ops.NotDifferentiable("LogicalNot") + + +@ops.RegisterGradient("Select") +def _SelectGrad(op: ops.Operation, grad): + c = op.inputs[0] + x = op.inputs[1] + zeros = array_ops.zeros_like(x) + return ( + None, + array_ops.where(c, grad, zeros), + array_ops.where(c, zeros, grad), + ) + + +@ops.RegisterGradient("SelectV2") +def _SelectGradV2(op: ops.Operation, grad): + c = op.inputs[0] + x = op.inputs[1] + y = op.inputs[2] + z = op.outputs[0] + zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype) + gx = array_ops.where_v2(c, grad, zeros) + gy = array_ops.where_v2(c, zeros, grad) + gx, _ = _ReduceGradientArgs(x, z, gx, None) + gy, _ = _ReduceGradientArgs(y, z, gy, None) + return None, gx, gy + + +def _MatMulGradAgainstFirstOnly(op: ops.Operation, grad): + """Gradient for MatMul, only for the first input.""" + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + b = math_ops.conj(op.inputs[1]) + if not t_a and not t_b: + grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True, grad_a=True) + elif not t_a and t_b: + grad_a = gen_math_ops.mat_mul(grad, b, grad_a=True) + elif t_a and not t_b: + grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True, grad_a=True) + elif t_a and t_b: + grad_a = gen_math_ops.mat_mul( + b, grad, transpose_a=True, transpose_b=True, grad_a=True + ) + return grad_a, None + + +def _MatMulGradAgainstSecondOnly(op: ops.Operation, grad): + """Gradient for MatMul, only for the second input.""" + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + a = math_ops.conj(op.inputs[0]) + if not t_a and not t_b: + grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True, grad_b=True) + elif not t_a and t_b: + grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, grad_b=True) + elif t_a and not t_b: + grad_b = gen_math_ops.mat_mul(a, grad, grad_b=True) + elif t_a and t_b: + grad_b = gen_math_ops.mat_mul( + grad, a, transpose_a=True, transpose_b=True, grad_b=True + ) + return None, grad_b + + +@ops.RegisterGradient("MatMul") +def _MatMulGrad(op: ops.Operation, grad): + """Gradient for MatMul.""" + try: + skip_input_indices = op.skip_input_indices + if skip_input_indices is not None: + if 1 in skip_input_indices: + return _MatMulGradAgainstFirstOnly(op, grad) + elif 0 in skip_input_indices: + return _MatMulGradAgainstSecondOnly(op, grad) + except AttributeError: + # No gradient skipping, so do the full gradient computation + pass + + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + a = math_ops.conj(op.inputs[0]) + b = math_ops.conj(op.inputs[1]) + if not t_a and not t_b: + grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True, grad_a=True) + grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True, grad_b=True) + elif not t_a and t_b: + grad_a = gen_math_ops.mat_mul(grad, b, grad_a=True) + grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, grad_b=True) + elif t_a and not t_b: + grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True, grad_a=True) + grad_b = gen_math_ops.mat_mul(a, grad, grad_b=True) + elif t_a and t_b: + grad_a = gen_math_ops.mat_mul( + b, grad, transpose_a=True, transpose_b=True, grad_a=True + ) + grad_b = gen_math_ops.mat_mul( + grad, a, transpose_a=True, transpose_b=True, grad_b=True + ) + return grad_a, grad_b + + +@ops.RegisterGradient("SparseMatMul") +def _SparseMatMulGrad(op: ops.Operation, grad): + """Gradient for SparseMatMul.""" + + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + is_sparse = {} + is_sparse[op.inputs[0].ref()] = op.get_attr("a_is_sparse") + is_sparse[op.inputs[1].ref()] = op.get_attr("b_is_sparse") + # Use heuristic to figure out if grad might be sparse + is_sparse[grad.ref()] = not context.executing_eagerly() and ( + grad.op.type == "ReluGrad") + + def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False): + """Helper function to create SparseMatMul op.""" + + assert t1.ref() in is_sparse and t2.ref() in is_sparse + t1_sparse = is_sparse[t1.ref()] + t2_sparse = is_sparse[t2.ref()] + if transpose_b: + t2 = array_ops.transpose(t2) + transpose_b = False + prod = math_ops.matmul( + t1, + t2, + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=t1_sparse, + b_is_sparse=t2_sparse) + if prod.dtype != out_dtype: + prod = math_ops.cast(prod, out_dtype) + return prod + + dtype_a = op.inputs[0].dtype + dtype_b = op.inputs[1].dtype + if not t_a and not t_b: + return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True), + _SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True)) + elif not t_a and t_b: + return (_SparseMatMul(grad, op.inputs[1], dtype_a), + _SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True)) + elif t_a and not t_b: + return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True), + _SparseMatMul(op.inputs[0], grad, dtype_b)) + elif t_a and t_b: + return (_SparseMatMul( + op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True), + _SparseMatMul( + grad, op.inputs[0], dtype_b, transpose_a=True, + transpose_b=True)) + + +@ops.RegisterGradient("Floor") +def _FloorGrad(_, unused_grad): + return [None] + + +@ops.RegisterGradient("Ceil") +def _CeilGrad(_, unused_grad): + return [None] + + +@ops.RegisterGradient("Round") +def _RoundGrad(_, unused_grad): + return [None] + + +@ops.RegisterGradient("Rint") +def _RintGrad(_, unused_grad): + # the gradient of Rint is zero + return [None] + + +@ops.RegisterGradient("BatchMatMul") +def _BatchMatMul(op: ops.Operation, grad): + """Returns the gradient of x and y given the gradient of x * y.""" + x = op.inputs[0] + y = op.inputs[1] + adj_x = op.get_attr("adj_x") + adj_y = op.get_attr("adj_y") + + if not adj_x: + if not adj_y: + grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True) + grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False) + else: + grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False) + grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False) + else: + if not adj_y: + grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True) + grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False) + else: + grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True) + grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True) + + return grad_x, grad_y + + +@ops.RegisterGradient("BatchMatMulV2") +@ops.RegisterGradient("BatchMatMulV3") +def _BatchMatMulV2(op: ops.Operation, grad): + """Returns the gradient of x and y given the gradient of x * y.""" + x = op.inputs[0] + y = op.inputs[1] + adj_x = op.get_attr("adj_x") + adj_y = op.get_attr("adj_y") + + if not adj_x: + if not adj_y: + grad_x = math_ops.matmul( + grad, y, adjoint_a=False, adjoint_b=True, grad_a=True + ) + grad_y = math_ops.matmul( + x, grad, adjoint_a=True, adjoint_b=False, grad_b=True + ) + else: + grad_x = math_ops.matmul( + grad, y, adjoint_a=False, adjoint_b=False, grad_a=True + ) + grad_y = math_ops.matmul( + grad, x, adjoint_a=True, adjoint_b=False, grad_b=True + ) + else: + if not adj_y: + grad_x = math_ops.matmul( + y, grad, adjoint_a=False, adjoint_b=True, grad_a=True + ) + grad_y = math_ops.matmul( + x, grad, adjoint_a=False, adjoint_b=False, grad_b=True + ) + else: + grad_x = math_ops.matmul( + y, grad, adjoint_a=True, adjoint_b=True, grad_a=True + ) + grad_y = math_ops.matmul( + grad, x, adjoint_a=True, adjoint_b=True, grad_b=True + ) + + # Possibly reduce along the broadcasted batch dimensions, if broadcasting + # is required. + shape_x_static = x.get_shape() + shape_y_static = y.get_shape() + output_may_have_non_empty_batch_shape = ( + (shape_x_static.rank is None or shape_x_static.rank > 2) or + (shape_y_static.rank is None or shape_y_static.rank > 2)) + batch_shapes_match = ( + shape_x_static[:-2].is_fully_defined() and + shape_y_static[:-2].is_fully_defined() and + shape_x_static[:-2] == shape_y_static[:-2]) + if (not output_may_have_non_empty_batch_shape) or batch_shapes_match: + return grad_x, grad_y + + sx = array_ops.shape(x) + sy = array_ops.shape(y) + rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2]) + grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx) + grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy) + return grad_x, grad_y + + +ops.NotDifferentiable("Range") +ops.NotDifferentiable("LinSpace") + + +@ops.RegisterGradient("Complex") +def _ComplexGrad(op: ops.Operation, grad): + """Returns the real and imaginary components of 'grad', respectively.""" + x = op.inputs[0] + y = op.inputs[1] + gx = math_ops.real(grad) + gy = math_ops.imag(grad) + return _ReduceGradientArgs(x, y, gx, gy) + + +@ops.RegisterGradient("Real") +def _RealGrad(_, grad): + """Returns 'grad' as the real part and set the imaginary part 0.""" + zero = constant_op.constant(0, dtype=grad.dtype) + return math_ops.complex(grad, zero) + + +@ops.RegisterGradient("Imag") +def _ImagGrad(_, grad): + """Returns 'grad' as the imaginary part and set the real part 0.""" + zero = constant_op.constant(0, dtype=grad.dtype) + return math_ops.complex(zero, grad) + + +@ops.RegisterGradient("Angle") +def _AngleGrad(op: ops.Operation, grad): + """Returns `-grad / (Im(x) + i Re(x))`.""" + x = op.inputs[0] + with ops.control_dependencies([grad]): + re = math_ops.real(x) + im = math_ops.imag(x) + z = math_ops.reciprocal(math_ops.complex(im, re)) + zero = constant_op.constant(0, dtype=grad.dtype) + complex_grad = math_ops.complex(grad, zero) + return -complex_grad * z + + +@ops.RegisterGradient("Conj") +def _ConjGrad(_, grad): + """Returns the complex conjugate of grad.""" + return math_ops.conj(grad) + + +@ops.RegisterGradient("ComplexAbs") +def _ComplexAbsGrad(op: ops.Operation, grad): + """Returns the gradient of ComplexAbs.""" + return math_ops.div_no_nan( + math_ops.complex( + grad, array_ops.zeros_like(grad)) * op.inputs[0], + math_ops.complex( + op.outputs[0], array_ops.zeros_like(op.outputs[0]))) + + +@ops.RegisterGradient("Cast") +def _CastGrad(op: ops.Operation, grad): + t = [ + dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16, + dtypes.complex64, dtypes.complex128 + ] + src_type = op.inputs[0].dtype.base_dtype + dst_type = grad.dtype.base_dtype + if src_type in t and dst_type in t: + return math_ops.cast(grad, src_type) + else: + return None + + +@ops.RegisterGradient("Cross") +def _CrossGrad(op: ops.Operation, grad): + u = op.inputs[0] + v = op.inputs[1] + return (math_ops.cross(v, grad), math_ops.cross(grad, u)) + + +@ops.RegisterGradient("Cumsum") +def _CumsumGrad(op: ops.Operation, grad): + axis = op.inputs[1] + exclusive = op.get_attr("exclusive") + reverse = op.get_attr("reverse") + return [ + math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse), + None + ] + + +@ops.RegisterGradient("Cumprod") +def _CumprodGrad(op: ops.Operation, grad): + x = op.inputs[0] + axis = op.inputs[1] + exclusive = op.get_attr("exclusive") + reverse = op.get_attr("reverse") + + prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse) + out = math_ops.cumsum( + prod * grad, axis, exclusive=exclusive, reverse=not reverse + ) + return [math_ops.div_no_nan(out, x), None] + + +# pylint: disable=missing-function-docstring +@ops.RegisterGradient("CumulativeLogsumexp") +def _CumulativeLogsumexpGrad(op: ops.Operation, grad): + x = op.inputs[0] + axis = op.inputs[1] + cumulative_logsumexp = op.outputs[0] + + exclusive = op.get_attr("exclusive") + reverse = op.get_attr("reverse") + + # Split the incoming gradient into positive and negative part + # in order to take logs. This is required for stable results. + log_grad_positive = array_ops.where_v2( + math_ops.greater(grad, 0), + math_ops.log(grad), + grad.dtype.min) + + log_grad_negative = array_ops.where_v2( + math_ops.less(grad, 0), + math_ops.log(-grad), + grad.dtype.min) + + output_pos = math_ops.exp( + math_ops.cumulative_logsumexp( + log_grad_positive - cumulative_logsumexp, + axis=axis, reverse=not reverse, exclusive=exclusive) + x) + + output_neg = math_ops.exp( + math_ops.cumulative_logsumexp( + log_grad_negative - cumulative_logsumexp, + axis=axis, reverse=not reverse, exclusive=exclusive) + x) + + return [output_pos - output_neg, None] + + +@ops.RegisterGradient("NextAfter") +def _NextAfterGrad(op: ops.Operation, grad): + """Returns gradient of nextafter(x1, x2) with respect to x1 and x2.""" + x1 = op.inputs[0] + x2 = op.inputs[1] + s_x1 = array_ops.shape(x1) + s_x2 = array_ops.shape(x2) + r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2) + with ops.control_dependencies([grad]): + partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype) + partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype) + return (array_ops.reshape( + math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1), + array_ops.reshape( + math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_impl_distribute.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_impl_distribute.py new file mode 100644 index 0000000000000000000000000000000000000000..b26115e2222f50b4e6c484451afe071e654a349a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/nn_impl_distribute.py @@ -0,0 +1,142 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Implementation of Neural Net (NN) functions with distribution strategy.""" + +from tensorflow.python.distribute import distribute_lib +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.losses import util as losses_util +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("nn.scale_regularization_loss") +@dispatch.add_dispatch_support +def scale_regularization_loss(regularization_loss): + """Scales the sum of the given regularization losses by number of replicas. + + Usage with distribution strategy and custom training loop: + + ```python + with strategy.scope(): + def compute_loss(self, label, predictions): + per_example_loss = tf.keras.losses.sparse_categorical_crossentropy( + labels, predictions) + + # Compute loss that is scaled by sample_weight and by global batch size. + loss = tf.nn.compute_average_loss( + per_example_loss, + sample_weight=sample_weight, + global_batch_size=GLOBAL_BATCH_SIZE) + + # Add scaled regularization losses. + loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights)) + return loss + ``` + + Args: + regularization_loss: Regularization loss. + + Returns: + Scalar loss value. + """ # pylint: disable=g-doc-exception + if ( + distribute_lib.has_strategy() + and distribute_lib.in_cross_replica_context() + ): + raise RuntimeError( + "You are calling `scale_regularization_loss` in cross replica context, " + "while it was expected to be called in replica context." + ) + + num_replicas = distribute_lib.get_strategy().num_replicas_in_sync + return math_ops.reduce_sum(regularization_loss) / num_replicas + + +@tf_export("nn.compute_average_loss") +@dispatch.add_dispatch_support +def compute_average_loss( + per_example_loss, sample_weight=None, global_batch_size=None +): + """Scales per-example losses with sample_weights and computes their average. + + Usage with distribution strategy and custom training loop: + + ```python + with strategy.scope(): + def compute_loss(labels, predictions, sample_weight=None): + + # If you are using a `Loss` class instead, set reduction to `NONE` so that + # we can do the reduction afterwards and divide by global batch size. + per_example_loss = tf.keras.losses.sparse_categorical_crossentropy( + labels, predictions) + + # Compute loss that is scaled by sample_weight and by global batch size. + return tf.nn.compute_average_loss( + per_example_loss, + sample_weight=sample_weight, + global_batch_size=GLOBAL_BATCH_SIZE) + ``` + + Args: + per_example_loss: Per-example loss. + sample_weight: Optional weighting for each example. + global_batch_size: Optional global batch size value. Defaults to (size of + first dimension of `losses`) * (number of replicas). + + Returns: + Scalar loss value, obtained by summing the `per_example_loss` and dividing + by `global_batch_size`. If `global_batch_size` is zero, the result is zero. + """ # pylint: disable=g-doc-exception + per_example_loss = ops.convert_to_tensor(per_example_loss) + input_dtype = per_example_loss.dtype + + with losses_util.check_per_example_loss_rank(per_example_loss): + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight) + per_example_loss = losses_util.scale_losses_by_sample_weight( + per_example_loss, sample_weight + ) + per_example_loss = math_ops.cast(per_example_loss, input_dtype) + + if global_batch_size is None: + if ( + distribute_lib.has_strategy() + and distribute_lib.in_cross_replica_context() + ): + raise RuntimeError( + "You are calling `compute_average_loss` in cross replica context, " + "while it was expected to be called in replica context." + ) + + num_replicas = distribute_lib.get_strategy().num_replicas_in_sync + per_replica_batch_size = array_ops.shape_v2(per_example_loss)[0] + global_batch_size = per_replica_batch_size * num_replicas + + check_ops.assert_scalar_v2( + global_batch_size, message="global_batch_size must be scalar." + ) + check_ops.assert_integer_v2( + global_batch_size, message="global_batch_size must be an integer." + ) + check_ops.assert_non_negative_v2( + global_batch_size, message="global_batch_size must be non-negative." + ) + + loss = math_ops.reduce_sum(per_example_loss) + global_batch_size = math_ops.cast(global_batch_size, input_dtype) + return math_ops.div_no_nan(loss, global_batch_size) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/state_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/state_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b80487cd78c16992473aba8d374160096915394e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/state_ops.py @@ -0,0 +1,1043 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Variables. + +See the [Variables](https://www.tensorflow.org/guide/variables) guide. +""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_resource_variable_ops +from tensorflow.python.ops import gen_state_ops +from tensorflow.python.ops import state_grad # pylint: disable=unused-import +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.gen_state_ops import * +# pylint: enable=wildcard-import +from tensorflow.python.util import deprecation +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.tf_export import tf_export + + +# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args +def variable_op(shape, dtype, name="Variable", set_shape=True, container="", + shared_name=""): + """Deprecated. Used variable_op_v2 instead.""" + if not set_shape: + shape = tensor_shape.unknown_shape() + ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name, + container=container, shared_name=shared_name) + # TODO(mrry): Move this to where it is used, so we can get rid of this op + # wrapper? + if set_shape: + ret.set_shape(shape) + return ret + + +def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""): + """Create a variable Operation. + + See also variables.Variable. + + Args: + shape: The shape of the tensor managed by this variable + dtype: The underlying type of the tensor values. + name: optional name to use for the variable op. + container: An optional string. Defaults to "". + If non-empty, this variable is placed in the given container. + Otherwise, a default container is used. + shared_name: An optional string. Defaults to "". + If non-empty, this variable is named in the given bucket + with this shared_name. Otherwise, the node name is used instead. + + Returns: + A variable tensor. + """ + return gen_state_ops.variable_v2( + shape=shape, + dtype=dtype, + name=name, + container=container, + shared_name=shared_name) + + +def init_variable(v, init, name="init"): + """Initializes variable with "init". + + This op does the following: + if init is a Tensor, v = init + if callable(init): v = init(VariableShape(v), v.dtype) + + Args: + v: Variable to initialize + init: Tensor to assign to v, + Or an object convertible to Tensor e.g. nparray, + Or an Initializer that generates a tensor given the shape and type of v. + An "Initializer" is a callable that returns a tensor that "v" should be + set to. It will be called as init(shape, dtype). + name: Optional name for the op. + + Returns: + The operation that initializes v. + """ + with ops.name_scope(None, v.op.name + "/", [v, init]): + with ops.name_scope(name) as scope: + with ops.colocate_with(v): + if callable(init): + assert v.get_shape().is_fully_defined(), "Variable shape unknown." + # TODO(mrry): Convert to v.shape when the property and + # accessor are reconciled (and all initializers support + # tf.TensorShape objects). + value = init(v.get_shape().as_list(), v.dtype.base_dtype) + value = ops.convert_to_tensor(value, name="value") + return gen_state_ops.assign(v, value, name=scope) + else: + init = ops.convert_to_tensor(init, name="init") + return gen_state_ops.assign(v, init, name=scope) + + +def is_variable_initialized(ref, name=None): + """Checks whether a tensor has been initialized. + + Outputs boolean scalar indicating whether the tensor has been initialized. + + Args: + ref: A mutable `Tensor`. + Should be from a `Variable` node. May be uninitialized. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.is_variable_initialized(ref=ref, name=name) + # Handle resource variables. + return ref.is_initialized(name=name) + + +@tf_export(v1=["assign_sub"]) +def assign_sub(ref, value, use_locking=None, name=None): + """Update `ref` by subtracting `value` from it. + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + Unlike `tf.math.subtract`, this op does not broadcast. `ref` and `value` + must have the same shape. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, + `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be + from a `Variable` node. + value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to + be subtracted to the variable. + use_locking: An optional `bool`. Defaults to `False`. If True, the + subtraction will be protected by a lock; otherwise the behavior is + undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + Same as `ref`. Returned as a convenience for operations that want + to use the new value after the variable has been updated. + + @compatibility(TF2) + `tf.compat.v1.assign_sub` is mostly compatible with eager + execution and `tf.function`. + + To switch to the native TF2 style, one could use method 'assign_sub' of + `tf.Variable`: + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :-------------- | :------------------------- | + | `ref` | `self` | In `assign_sub()` method | + | `value` | `value` | In `assign_sub()` method | + | `use_locking` | `use_locking` | In `assign_sub()` method | + | `name` | `name` | In `assign_sub()` method | + | - | `read_value` | Set to True to replicate | + : : : behavior (True is default) : + + + #### Before & After Usage Example + + Before: + + >>> with tf.Graph().as_default(): + ... with tf.compat.v1.Session() as sess: + ... a = tf.compat.v1.Variable(1, dtype=tf.int64) + ... sess.run(a.initializer) + ... update_op = tf.compat.v1.assign_sub(a, 1) + ... res_a = sess.run(update_op) + ... res_a + 0 + + After: + + >>> b = tf.Variable(1, dtype=tf.int64) + >>> res_b = b.assign_sub(1) + >>> res_b.numpy() + 0 + + @end_compatibility + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.assign_sub( + ref, value, use_locking=use_locking, name=name) + return ref.assign_sub(value) + + +@tf_export(v1=["assign_add"]) +def assign_add(ref, value, use_locking=None, name=None): + """Update `ref` by adding `value` to it. + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + Unlike `tf.math.add`, this op does not broadcast. `ref` and `value` must have + the same shape. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, + `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be + from a `Variable` node. + value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to + be added to the variable. + use_locking: An optional `bool`. Defaults to `False`. If True, the addition + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + Same as `ref`. Returned as a convenience for operations that want + to use the new value after the variable has been updated. + + @compatibility(TF2) + `tf.compat.v1.assign_add` is mostly compatible with eager + execution and `tf.function`. + + To switch to the native TF2 style, one could use method 'assign_add' of + `tf.Variable`: + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :-------------- | :------------------------- | + | `ref` | `self` | In `assign_add()` method | + | `value` | `value` | In `assign_add()` method | + | `use_locking` | `use_locking` | In `assign_add()` method | + | `name` | `name` | In `assign_add()` method | + | - | `read_value` | Set to True to replicate | + : : : behavior (True is default) : + + + #### Before & After Usage Example + + Before: + + >>> with tf.Graph().as_default(): + ... with tf.compat.v1.Session() as sess: + ... a = tf.compat.v1.Variable(0, dtype=tf.int64) + ... sess.run(a.initializer) + ... update_op = tf.compat.v1.assign_add(a, 1) + ... res_a = sess.run(update_op) + ... res_a + 1 + + After: + + >>> b = tf.Variable(0, dtype=tf.int64) + >>> res_b = b.assign_add(1) + >>> res_b.numpy() + 1 + + @end_compatibility + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.assign_add( + ref, value, use_locking=use_locking, name=name) + return ref.assign_add(value) + + +@tf_export(v1=["assign"]) +def assign(ref, value, validate_shape=None, use_locking=None, name=None): + """Update `ref` by assigning `value` to it. + + This operation outputs a Tensor that holds the new value of `ref` after + the value has been assigned. This makes it easier to chain operations that + need to use the reset value. + + Args: + ref: A mutable `Tensor`. Should be from a `Variable` node. May be + uninitialized. + value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to + be assigned to the variable. + validate_shape: An optional `bool`. Defaults to `True`. If true, the + operation will validate that the shape of 'value' matches the shape of the + Tensor being assigned to. If false, 'ref' will take on the shape of + 'value'. + use_locking: An optional `bool`. Defaults to `True`. If True, the assignment + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + A `Tensor` that will hold the new value of `ref` after + the assignment has completed. + + @compatibility(TF2) + `tf.compat.v1.assign` is mostly compatible with eager + execution and `tf.function`. However, argument 'validate_shape' will be + ignored. To avoid shape validation, set 'shape' to tf.TensorShape(None) when + constructing the variable: + + >>> import tensorflow as tf + >>> a = tf.Variable([1], shape=tf.TensorShape(None)) + >>> tf.compat.v1.assign(a, [2,3]) + + To switch to the native TF2 style, one could use method 'assign' of + `tf.Variable`: + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :-------------- | :------------------------- | + | `ref` | `self` | In `assign()` method | + | `value` | `value` | In `assign()` method | + | `validate_shape` | Not supported | Specify `shape` in the | + : : : constructor to replicate : + : : : behavior : + | `use_locking` | `use_locking` | In `assign()` method | + | `name` | `name` | In `assign()` method | + | - | `read_value` | Set to True to replicate | + : : : behavior (True is default) : + @end_compatibility + + + #### Before & After Usage Example + + Before: + + >>> with tf.Graph().as_default(): + ... with tf.compat.v1.Session() as sess: + ... a = tf.compat.v1.Variable(0, dtype=tf.int64) + ... sess.run(a.initializer) + ... update_op = tf.compat.v1.assign(a, 2) + ... res_a = sess.run(update_op) + ... res_a + 2 + + After: + + >>> b = tf.Variable(0, dtype=tf.int64) + >>> res_b = b.assign(2) + >>> res_b.numpy() + 2 + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.assign( + ref, value, use_locking=use_locking, name=name, + validate_shape=validate_shape) + return ref.assign(value, name=name) + + +@tf_export(v1=["count_up_to"]) +@deprecated(None, "Prefer Dataset.range instead.") +def count_up_to(ref, limit, name=None): + r"""Increments 'ref' until it reaches 'limit'. + + Args: + ref: A Variable. Must be one of the following types: `int32`, `int64`. + Should be from a scalar `Variable` node. + limit: An `int`. + If incrementing ref would bring it above limit, instead generates an + 'OutOfRange' error. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `ref`. + A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.count_up_to(ref, limit=limit, name=name) + return gen_state_ops.resource_count_up_to( + ref.handle, limit, T=ref.dtype, name=name) + + +@tf_export(v1=["scatter_update"]) +def scatter_update(ref, indices, updates, use_locking=True, name=None): + # pylint: disable=line-too-long + r"""Applies sparse updates to a variable reference. + + This operation computes + + ```python + # Scalar indices + ref[indices, ...] = updates[...] + + # Vector indices (for each i) + ref[indices[i], ...] = updates[i, ...] + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + ``` + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + If values in `ref` is to be updated more than once, because there are + duplicate entries in `indices`, the order at which the updates happen + for each value is undefined. + + Requires `updates.shape = indices.shape + ref.shape[1:]`. + +
+ +
+ + Args: + ref: A `Variable`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. + A tensor of updated values to store in `ref`. + use_locking: An optional `bool`. Defaults to `True`. + If True, the assignment will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + Same as `ref`. Returned as a convenience for operations that want + to use the updated values after the update is done. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_update(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_nd_update"]) +def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): + r"""Applies sparse `updates` to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ``` + + For example, say we want to update 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + update = tf.compat.v1.scatter_nd_update(ref, indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(update) + ``` + + The resulting update to ref would look like this: + + [1, 11, 3, 10, 9, 6, 7, 12] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + ref: A Variable. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into ref. + updates: A `Tensor`. Must have the same type as `ref`. + A Tensor. Must have the same type as ref. A tensor of updated + values to add to ref. + use_locking: An optional `bool`. Defaults to `True`. + An optional bool. Defaults to True. If True, the assignment will + be protected by a lock; otherwise the behavior is undefined, + but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + The value of the variable after the update. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_nd_update( + ref, indices, updates, use_locking, name) + return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_add"]) +def scatter_add(ref, indices, updates, use_locking=False, name=None): + # pylint: disable=line-too-long + r"""Adds sparse updates to the variable referenced by `resource`. + + This operation computes + + ```python + # Scalar indices + ref[indices, ...] += updates[...] + + # Vector indices (for each i) + ref[indices[i], ...] += updates[i, ...] + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + ``` + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the updated value. + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their contributions add. + + Requires `updates.shape = indices.shape + ref.shape[1:]`. + +
+ +
+ + Args: + ref: A `Variable`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. + A tensor of updated values to store in `ref`. + use_locking: An optional `bool`. Defaults to `False`. + If True, the assignment will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + Same as `ref`. Returned as a convenience for operations that want + to use the updated values after the update is done. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_add(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_nd_add"]) +def scatter_nd_add(ref, indices, updates, use_locking=False, name=None): + r"""Applies sparse addition to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + ``` + + For example, say we want to add 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that addition would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1], [7]]) + updates = tf.constant([9, 10, 11, 12]) + add = tf.compat.v1.scatter_nd_add(ref, indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(add) + ``` + + The resulting update to ref would look like this: + + [1, 13, 3, 14, 14, 6, 7, 20] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, + `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, + `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into ref. + updates: A `Tensor`. Must have the same type as `ref`. + A tensor of updated values to add to ref. + use_locking: An optional `bool`. Defaults to `False`. + If True, the assignment will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_nd_add( + ref, indices, updates, use_locking, name) + return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_sub"]) +def scatter_sub(ref, indices, updates, use_locking=False, name=None): + r"""Subtracts sparse updates to a variable reference. + + ```python + # Scalar indices + ref[indices, ...] -= updates[...] + + # Vector indices (for each i) + ref[indices[i], ...] -= updates[i, ...] + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + ``` + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their (negated) contributions add. + + Requires `updates.shape = indices.shape + ref.shape[1:]` or + `updates.shape = []`. + +
+ +
+ + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, + `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, + `uint32`, `uint64`. Should be from a `Variable` node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. + A tensor of updated values to subtract from `ref`. + use_locking: An optional `bool`. Defaults to `False`. + If True, the subtraction will be protected by a lock; + otherwise the behavior is undefined, but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_sub(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_nd_sub"]) +def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None): + r"""Applies sparse subtraction to individual values or slices in a Variable. + + `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + + `indices` must be integer tensor, containing indices into `ref`. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + + The innermost dimension of `indices` (with length `K`) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + dimension of `ref`. + + `updates` is `Tensor` of rank `Q-1+P-K` with shape: + + ``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + ``` + + For example, say we want to subtract 4 scattered elements from a rank-1 tensor + with 8 elements. In Python, that update would look like this: + + ```python + ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + op = tf.compat.v1.scatter_nd_sub(ref, indices, updates) + with tf.compat.v1.Session() as sess: + print sess.run(op) + ``` + + The resulting update to ref would look like this: + + [1, -9, 3, -6, -6, 6, 7, -4] + + See `tf.scatter_nd` for more details about how to make updates to + slices. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, + `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, + `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A tensor of indices into ref. + updates: A `Tensor`. Must have the same type as `ref`. + A tensor of updated values to add to ref. + use_locking: An optional `bool`. Defaults to `False`. + An optional bool. Defaults to True. If True, the assignment will + be protected by a lock; otherwise the behavior is undefined, + but may exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_nd_sub( + ref, indices, updates, use_locking, name) + return ref._lazy_read(gen_state_ops.resource_scatter_nd_sub( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_mul"]) +def scatter_mul(ref, indices, updates, use_locking=False, name=None): + # pylint: disable=line-too-long + r"""Multiplies sparse updates into a variable reference. + + This operation computes + + ```python + # Scalar indices + ref[indices, ...] *= updates[...] + + # Vector indices (for each i) + ref[indices[i], ...] *= updates[i, ...] + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + ``` + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their contributions multiply. + + Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = + []`. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, + `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, + `uint32`, `uint64`. Should be from a `Variable` node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A + tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated + values to multiply to `ref`. + use_locking: An optional `bool`. Defaults to `False`. If True, the operation + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_mul(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_mul( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_div"]) +def scatter_div(ref, indices, updates, use_locking=False, name=None): + # pylint: disable=line-too-long + r"""Divides a variable reference by sparse updates. + + This operation computes + + ```python + # Scalar indices + ref[indices, ...] /= updates[...] + + # Vector indices (for each i) + ref[indices[i], ...] /= updates[i, ...] + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + ``` + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their contributions divide. + + Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = + []`. + + Args: + ref: A mutable `Tensor`. Must be one of the following types: `float32`, + `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, + `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, + `uint32`, `uint64`. Should be from a `Variable` node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A + tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. A tensor of values + that `ref` is divided by. + use_locking: An optional `bool`. Defaults to `False`. If True, the operation + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_div(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_div( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_max"]) +def scatter_max(ref, indices, updates, use_locking=False, name=None): + # pylint: disable=line-too-long + r"""Reduces sparse updates into a variable reference using the `max` operation. + + This operation computes + + # Scalar indices + ref[indices, ...] = max(ref[indices, ...], updates[...]) + + # Vector indices (for each i) + ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], + updates[i, ..., j, ...]) + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their contributions combine. + + Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = + []`. + +
+ +
+ + Args: + ref: A mutable `Tensor`. Must be one of the following types: `half`, + `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a + `Variable` node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A + tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated + values to reduce into `ref`. + use_locking: An optional `bool`. Defaults to `False`. If True, the update + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_max(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_max( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["scatter_min"]) +def scatter_min(ref, indices, updates, use_locking=False, name=None): + # pylint: disable=line-too-long + r"""Reduces sparse updates into a variable reference using the `min` operation. + + This operation computes + + # Scalar indices + ref[indices, ...] = min(ref[indices, ...], updates[...]) + + # Vector indices (for each i) + ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + + # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], + updates[i, ..., j, ...]) + + This operation outputs `ref` after the update is done. + This makes it easier to chain operations that need to use the reset value. + + Duplicate entries are handled correctly: if multiple `indices` reference + the same location, their contributions combine. + + Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = + []`. + +
+ +
+ + Args: + ref: A mutable `Tensor`. Must be one of the following types: `half`, + `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a + `Variable` node. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A + tensor of indices into the first dimension of `ref`. + updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated + values to reduce into `ref`. + use_locking: An optional `bool`. Defaults to `False`. If True, the update + will be protected by a lock; otherwise the behavior is undefined, but may + exhibit less contention. + name: A name for the operation (optional). + + Returns: + A mutable `Tensor`. Has the same type as `ref`. + """ + if ref.dtype._is_ref_dtype: + return gen_state_ops.scatter_min(ref, indices, updates, + use_locking=use_locking, name=name) + return ref._lazy_read(gen_resource_variable_ops.resource_scatter_min( # pylint: disable=protected-access + ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), + name=name)) + + +@tf_export(v1=["batch_scatter_update"]) +@deprecation.deprecated( + "2018-11-29", "Use the batch_scatter_update method of Variable instead.") +def batch_scatter_update(ref, indices, updates, use_locking=True, name=None): + """Generalization of `tf.compat.v1.scatter_update` to axis different than 0. + + Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates` + have a series of leading dimensions that are the same for all of them, and the + updates are performed on the last dimension of indices. In other words, the + dimensions should be the following: + + `num_prefix_dims = indices.ndims - 1` + `batch_dim = num_prefix_dims + 1` + `updates.shape = indices.shape + var.shape[batch_dim:]` + + where + + `updates.shape[:num_prefix_dims]` + `== indices.shape[:num_prefix_dims]` + `== var.shape[:num_prefix_dims]` + + And the operation performed can be expressed as: + + `var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]` + + When indices is a 1D tensor, this operation is equivalent to + `tf.compat.v1.scatter_update`. + + To avoid this operation there would be 2 alternatives: + 1) Reshaping the variable by merging the first `ndims` dimensions. However, + this is not possible because `tf.reshape` returns a Tensor, which we + cannot use `tf.compat.v1.scatter_update` on. + 2) Looping over the first `ndims` of the variable and using + `tf.compat.v1.scatter_update` on the subtensors that result of slicing the + first + dimension. This is a valid option for `ndims = 1`, but less efficient than + this implementation. + + See also `tf.compat.v1.scatter_update` and `tf.compat.v1.scatter_nd_update`. + + Args: + ref: `Variable` to scatter onto. + indices: Tensor containing indices as described above. + updates: Tensor of updates to apply to `ref`. + use_locking: Boolean indicating whether to lock the writing operation. + name: Optional scope name string. + + Returns: + Ref to `variable` after it has been modified. + + Raises: + ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are + not the same. + """ + with ops.name_scope(name): + indices = ops.convert_to_tensor(indices, name="indices") + indices_shape = array_ops.shape(indices) + indices_dimensions = indices.get_shape().ndims + + if indices_dimensions is None: + raise ValueError("batch_gather does not allow indices with unknown " + "shape.") + + nd_indices = array_ops.expand_dims(indices, axis=-1) + nd_indices_list = [] + + # Scatter ND requires indices to have an additional dimension, in which the + # coordinates of the updated things are specified. For this to be adapted to + # the scatter_update with several leading dimensions, we simply make use of + # a tf.range for all the leading dimensions followed by concat of all the + # coordinates we created with the original indices. + + # For example if indices.shape = [2, 3, 4], we should generate the following + # indices for tf.compat.v1.scatter_nd_update: + # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]] + # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]] + # nd_indices[:, :, 2] = indices + for dimension in range(indices_dimensions - 1): + # In this loop we generate the following for the example (one for each + # iteration). + # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]] + # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]] + # This is done at every iteration with a tf.range over the size of the + # i-th dimension and using broadcasting over the desired shape. + dimension_size = indices_shape[dimension] + shape_to_broadcast = [1] * (indices_dimensions + 1) + shape_to_broadcast[dimension] = dimension_size + dimension_range = array_ops.reshape( + gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast) + if dimension_range.dtype.base_dtype != nd_indices.dtype: + dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype) + nd_indices_list.append( + dimension_range * array_ops.ones_like(nd_indices)) + # Add the original indices at the end, as described above, and concat. + nd_indices_list.append(nd_indices) + final_indices = array_ops.concat(nd_indices_list, axis=-1) + return scatter_nd_update( + ref, final_indices, updates, use_locking=use_locking) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/weights_broadcast_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/weights_broadcast_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..908558dc3923c3c8ee97bfc42f466124e851947d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/weights_broadcast_ops.py @@ -0,0 +1,177 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Weight broadcasting operations. + +In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. This +file includes operations for those broadcasting rules. +""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sets +from tensorflow.python.util.tf_export import tf_export + + +def _has_valid_dims(weights_shape, values_shape): + with ops.name_scope( + None, "has_invalid_dims", (weights_shape, values_shape)) as scope: + values_shape_2d = array_ops.expand_dims(values_shape, -1) + valid_dims = array_ops.concat( + (values_shape_2d, array_ops.ones_like(values_shape_2d)), axis=1) + weights_shape_2d = array_ops.expand_dims(weights_shape, -1) + invalid_dims = sets.set_difference(weights_shape_2d, valid_dims) + num_invalid_dims = array_ops.size( + invalid_dims.values, name="num_invalid_dims") + return math_ops.equal(0, num_invalid_dims, name=scope) + + +def _has_valid_nonscalar_shape( + weights_rank, weights_shape, values_rank, values_shape): + with ops.name_scope( + None, "has_valid_nonscalar_shape", + (weights_rank, weights_shape, values_rank, values_shape)) as scope: + is_same_rank = math_ops.equal( + values_rank, weights_rank, name="is_same_rank") + return cond.cond( + is_same_rank, + lambda: _has_valid_dims(weights_shape, values_shape), + lambda: is_same_rank, + name=scope) + + +_ASSERT_BROADCASTABLE_ERROR_PREFIX = "weights can not be broadcast to values." + + +def assert_broadcastable(weights, values): + """Asserts `weights` can be broadcast to `values`. + + In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We + let weights be either scalar, or the same rank as the target values, with each + dimension either 1, or the same as the corresponding values dimension. + + Args: + weights: `Tensor` of weights. + values: `Tensor` of values to which weights are applied. + + Returns: + `Operation` raising `InvalidArgumentError` if `weights` has incorrect shape. + `no_op` if static checks determine `weights` has correct shape. + + Raises: + ValueError: If static checks determine `weights` has incorrect shape. + """ + with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope: + with ops.name_scope(None, "weights", (weights,)) as weights_scope: + weights = ops.convert_to_tensor(weights, name=weights_scope) + weights_shape = array_ops.shape(weights, name="shape") + weights_rank = array_ops.rank(weights, name="rank") + weights_rank_static = tensor_util.constant_value(weights_rank) + + with ops.name_scope(None, "values", (values,)) as values_scope: + values = ops.convert_to_tensor(values, name=values_scope) + values_shape = array_ops.shape(values, name="shape") + values_rank = array_ops.rank(values, name="rank") + values_rank_static = tensor_util.constant_value(values_rank) + + # Try static checks. + if weights_rank_static is not None and values_rank_static is not None: + if weights_rank_static == 0: + return control_flow_ops.no_op(name="static_scalar_check_success") + if weights_rank_static != values_rank_static: + raise ValueError( + f"{_ASSERT_BROADCASTABLE_ERROR_PREFIX} values.rank=" + f"{values_rank_static}. weights.rank={weights_rank_static}. " + f"values.shape={values.shape}. weights.shape={weights.shape}. " + f"Received weights={weights}, values={values}") + weights_shape_static = tensor_util.constant_value(weights_shape) + values_shape_static = tensor_util.constant_value(values_shape) + if weights_shape_static is not None and values_shape_static is not None: + # Sanity check, this should always be true since we checked rank above. + ndims = len(values_shape_static) + assert ndims == len(weights_shape_static) + + for i in range(ndims): + if weights_shape_static[i] not in (1, values_shape_static[i]): + raise ValueError( + f"{_ASSERT_BROADCASTABLE_ERROR_PREFIX} Mismatch at dim {i}. " + f"values.shape={values_shape_static}, weights.shape=" + f"{weights_shape_static}. Received weights={weights}, " + f"values={values}") + return control_flow_ops.no_op(name="static_dims_check_success") + + # Dynamic checks. + is_scalar = math_ops.equal(0, weights_rank, name="is_scalar") + data = ( + _ASSERT_BROADCASTABLE_ERROR_PREFIX, + "weights.shape=", weights.name, weights_shape, + "values.shape=", values.name, values_shape, + "is_scalar=", is_scalar, + ) + is_valid_shape = cond.cond( + is_scalar, + lambda: is_scalar, + lambda: _has_valid_nonscalar_shape( # pylint: disable=g-long-lambda + weights_rank, weights_shape, values_rank, values_shape), + name="is_valid_shape") + return control_flow_assert.Assert(is_valid_shape, data, name=scope) + + +@tf_export("__internal__.ops.broadcast_weights", v1=[]) +def broadcast_weights(weights, values): + """Broadcast `weights` to the same shape as `values`. + + This returns a version of `weights` following the same broadcast rules as + `mul(weights, values)`, but limited to the weights shapes allowed by + `assert_broadcastable`. When computing a weighted average, use this function + to broadcast `weights` before summing them; e.g., + `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. + + Args: + weights: `Tensor` whose shape is broadcastable to `values` according to the + rules of `assert_broadcastable`. + values: `Tensor` of any shape. + + Returns: + `weights` broadcast to `values` shape according to the rules of + `assert_broadcastable`. + """ + with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope: + values = ops.convert_to_tensor(values, name="values") + weights = ops.convert_to_tensor( + weights, dtype=values.dtype.base_dtype, name="weights") + + # Try static check for exact match. + weights_shape = weights.get_shape() + values_shape = values.get_shape() + if (weights_shape.is_fully_defined() and + values_shape.is_fully_defined() and + weights_shape.is_compatible_with(values_shape)): + return weights + + # Skip the assert_broadcastable on TPU/GPU because asserts are not + # supported so it only causes unnecessary ops. Also skip it because it uses + # a DenseToDenseSetOperation op that is incompatible with the TPU/GPU when + # the shape(s) are dynamic. + if control_flow_ops.get_enclosing_xla_context() is not None: + return math_ops.multiply( + weights, array_ops.ones_like(values), name=scope) + with ops.control_dependencies((assert_broadcastable(weights, values),)): + return math_ops.multiply( + weights, array_ops.ones_like(values), name=scope)