diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..99b3577c2cc8265cea24492a020011a2f775dd87 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py @@ -0,0 +1,3 @@ +from .cond import cond +from .while_loop import while_loop +from .flex_attention import flex_attention, flex_attention_backward diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55f8be526580d178f5326520292eb33ec9864460 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/associative_scan.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/associative_scan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..199d8d7e1658e8ce3fbdbd0d2d68f4f44e7e2858 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/associative_scan.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1ef664648c934da11e1ac29fe3054703e9f5792 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fe503ee091846c6f9094c2799ee0cac402d5407 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31343cfa15d81e7071e4162ce66eb04b6cfacab4 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/flex_attention.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/flex_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f2167979b8019e8b6111cc85c924eb327b37ed3 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/flex_attention.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29c43ea47239d4766eff3bdd1076c12938d99d22 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..126cec4f00891df73708307560f4477b10a06c1d Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f34eca8d77f1884ccec912f8ccbe7f895b14cc9 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bd399e3311de9e9dbedd0a120bbd028af71c881 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c7694d106e21354ced25482e16d83862e35d566 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0944a0d970134d0859c510896e8d0d1bb18a27f Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..360e0aa9a7a689fe34fc98465a932d6dba204d93 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f977354ab32cd1dbb9ab814bd19769b2807f27c Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/associative_scan.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/associative_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..0d88aa0db2c6da4427339069f7a3235d56c84d35 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/associative_scan.py @@ -0,0 +1,216 @@ +# mypy: allow-untyped-defs +import functools +import itertools +from typing import Callable, List + +import torch + +import torch._prims_common as utils +import torch._subclasses.functional_tensor + +import torch.utils._pytree as pytree + +from torch._C import DispatchKey +from torch._C._functorch import _add_batch_dim, get_unwrapped, maybe_get_bdim +from torch._higher_order_ops.utils import ( + _set_compilation_env, + autograd_not_implemented, + reenter_make_fx, + unique_graph_id, +) + +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + +aten = torch._ops.ops.aten + + +def wrap_combine_fn_flat(*args, combine_fn, spec, num_leaves): + assert len(args) == 2 * num_leaves + lhs = pytree.tree_unflatten(args[:num_leaves], spec) + rhs = pytree.tree_unflatten(args[num_leaves:], spec) + combined = combine_fn(lhs, rhs) + combined_leaves = pytree.tree_leaves(combined) + assert num_leaves == len(combined_leaves) + return combined_leaves + + +def associative_scan( + combine_fn: Callable[[pytree.PyTree, pytree.PyTree], pytree.PyTree], + input: pytree.PyTree, + dim: int, +) -> torch.Tensor: + r""" + Performs an inclusive scan with an associative pointwise combine function. + + .. warning:: + `torch.associative_scan` is a prototype feature in PyTorch. It currently + does not support autograd and you may run into miscompiles. + Read more about feature classification at: + https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype + + This operator requires runtime code generation and so requires support for + ``torch.compile``. Further, only CUDA device codegen is supported at the moment. + + Args: + combine_fn (Callable): A binary callable with type ``(Tensor, Tensor) -> Tensor``, + or if input is a pytree ``(pytree, pytree) -> pytree``. + This function must be pure, pointwise, and satisfy the associative property. + input (torch.Tensor): The input tensor, or nested pytree of tensors. + All inputs are expected to have the same shape. + dim (int): the dimension to scan over + + + Example:: + + def add(x: torch.Tensor, y: torch.Tensor): + return x + y + + cumsum = associative_scan(add, x, dim) + + """ + assert callable(combine_fn), "combine_fn must be a callable, but got {combine_fn}" + assert isinstance(dim, int), "dim must be an int, but got {type(dim)}" + + if not torch._dynamo.is_compiling(): + with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit(): + return torch.compile(associative_scan, fullgraph=True)( + combine_fn, input, dim + ) + + leaves, spec = pytree.tree_flatten(input) + + assert len(leaves) >= 1, "expected at least 1 input leaf" + assert all( + isinstance(x, torch.Tensor) for x in leaves + ), "input leaves must be a Tensor" + shape = leaves[0].shape + ndim = len(shape) + dim = utils.canonicalize_dim(ndim, dim) + + for x in leaves[1:]: + assert x.shape == shape, "All input tensors must have the same shape" + + combine_fn = functools.partial( + wrap_combine_fn_flat, combine_fn=combine_fn, spec=spec, num_leaves=len(leaves) + ) + + result_flat = associative_scan_op(combine_fn, leaves, dim) + + return pytree.tree_unflatten(result_flat, spec) + + +associative_scan_op = HigherOrderOperator("associative_scan") + + +def trace_associative_scan( + proxy_mode, func_overload, combine_fn: Callable, input: List[torch.Tensor], dim: int +): + with disable_proxy_modes_tracing(): + sample_inputs = [ + torch.full((), False, dtype=x.dtype, device=x.device) + for x in itertools.chain(input, input) + ] + combine_graph = reenter_make_fx(combine_fn)(*sample_inputs) + + outputs = None + for node in combine_graph.graph.nodes: + if node.op == "output": + assert outputs is None + assert len(node.args) == 1 + outputs = node.args[0] + + assert outputs is not None + assert len(outputs) == len( + input + ), f"expected combine_fn to return {len(input)} results but got {len(outputs)}" + + for i, o in zip(input, outputs): + o_meta = o.meta["tensor_meta"] + assert o_meta.dtype == i.dtype, ( + f"combine_fn output type mismatch, expected {i.dtype} " + + f"but got {o_meta.dtype}" + ) + assert ( + o_meta.shape == () + ), f"combine_fn must return a scalar tensor but got shape {o_meta.shape}" + assert ( + o_meta.shape == () + ), f"combine_fn must return a scalar tensor but got shape {o_meta.shape}" + + _, combine_graph_name = unique_graph_id(proxy_mode, prefix="scan_combine_graph") + + proxy_mode.tracer.root.register_module(combine_graph_name, combine_graph) + + args = (combine_graph, input, dim) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="associative_scan" + ) + + with disable_proxy_modes_tracing(): + out = [aten.clone(x) for x in input] + + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@associative_scan_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def associative_scan_op_dense(combine_fn, input, dim): + raise NotImplementedError("associative_scan is not implemented for eager") + + +associative_scan_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(associative_scan_op, deferred_error=True) +) + + +@associative_scan_op.py_impl(ProxyTorchDispatchMode) +def associative_scan_proxy_mode(mode, combine_fn, input, dim): + if mode.enable_tracing: + return trace_associative_scan(mode, associative_scan_op, combine_fn, input, dim) + else: + return associative_scan_op(mode, associative_scan_op, combine_fn, input, dim) + + +@associative_scan_op.py_impl(FakeTensorMode) +def assoiciative_scan_fake_tensor_mode(mode, combine_fn, input, dim): + with mode: + return [x.clone() for x in input] + + +@associative_scan_op.py_functionalize_impl +def associative_scan_functionalize(ctx, combine_fn, input, dim): + unwrapped_input = ctx.unwrap_tensors(input) + with ctx.redispatch_to_next() as m: + ret = associative_scan_op(combine_fn, unwrapped_input, dim) + return ctx.wrap_tensors(ret) + + +@associative_scan_op.py_impl(torch._C._functorch.TransformType.Vmap) +def associative_scan_batch_rule(interpreter, input, dim, combine_fn): + input_ = [get_unwrapped(x) for x in input] + input_bdims = [maybe_get_bdim(x) for x in input] + + batch_size = None + for inp, bdim in zip(input, input_bdims): + if bdim is not None: + batch_size = get_unwrapped(inp).shape[bdim] + + assert batch_size + input_unwrapped = [] + for x, bdim in zip(input, input_bdims): + unwrap = get_unwrapped(x) + if dim is None: + unwrap = unwrap.unsqueeze(0).expand(batch_size, *x.shape) + else: + unwrap = unwrap.movedim(bdim, 0) + input_unwrapped.append(unwrap) + + res = associative_scan_op(combine_fn, input_unwrapped, dim + 1) + lvl = interpreter.level() + return [_add_batch_dim(x, 0, lvl) for x in res] diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py new file mode 100644 index 0000000000000000000000000000000000000000..189f746b77a01074bf4dd8a1b40e95d5ab40fdbe --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py @@ -0,0 +1,262 @@ +# mypy: allow-untyped-defs +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._prims_common import clone_preserve_strides +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + + +# NOTE: [auto-functionalizing custom ops] +# Users may wish to torch.compile custom ops that mutate their inputs. +# torch.compile will automatically support this op without anyone needing +# to provide a functionalization kernel for it. Here's how. +# +# Let's say we have a hypothetical mylib::sin_(Tensor(a!) x) -> () +# op. First, when FakeTensor sees this op: +# - If the schema says it returns nothing, we can generate a trivial +# FakeTensor rule for it (that returns nothing). +# - Otherwise, the user needs to provide a FakeTensor impl (fake impl) +# +# Next, when Python FunctionalTensor sees the op, it will functionalize +# it by emitting a call to an auto_functionalize(op, ["x"], {"x": ...}) +# HOP and replacing the mutated inputs with corresponding outputs of this HOP. +# This HOP effectively runs the functional version of the op when +# called: it clones inputs that will be mutated, runs the op, and +# then returns (output, Tensors with the new values) + + +class AutoFunctionalized(HigherOrderOperator): + """auto_functionalized(_mutable_op, **kwargs) + + This HOP runs a "functional" version of _mutable_op. + + Concretely, it looks at all the arguments that are mutable through + _mutable_op's operator schema, clones those kwargs, runs + `out = _mutable_op(**kwargs)` with the cloned values, and then returns the + operator output concatenated with the cloned values that were mutated. + + We have some restrictions on `_mutable_op`. + See `can_auto_functionalize` for the restrictions. We can likely lift + many of these if users request it. + + The reason why _mutable_op is prefixed with an + underscore is to prevent collisions with kwarg names in **kwargs. + """ + + def __init__(self): + super().__init__("auto_functionalized") + + def __call__( + self, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], + ) -> Tuple[Any, Tuple[Tensor, ...]]: + assert can_auto_functionalize(_mutable_op) + assert isinstance(kwargs, dict) + return super().__call__(_mutable_op, **kwargs) + + +auto_functionalized = AutoFunctionalized() + + +def can_auto_functionalize(op: torch._ops.OperatorBase) -> bool: + if not isinstance(op, torch._ops.OpOverload): + return False + + if torch._library.utils.is_builtin(op): + # We control the built-ins. These may (in rare cases) + # do input metadata mutation (which we have banned on custom ops) + return False + schema = op._schema + if not schema.is_mutable: + return False + schema = op._schema + + for arg in schema.arguments: + if arg.alias_info is None: + continue + if not arg.alias_info.is_write: + continue + if type(arg.type) is torch.TensorType: + continue + if ( + type(arg.type) is torch.OptionalType + and type(arg.type.getElementType()) is torch.TensorType + ): + continue + # Not yet supported: other Tensor types. This includes things like + # Tensor[], Tensor?[], Tensor[]?. + return False + + # The returns must not alias anything + for ret in schema.returns: + if ret.alias_info is None and type(ret.type) is torch.TensorType: + continue + # Not yet supported: List[Tensor] return. + return False + return True + + +@auto_functionalized.py_impl(DispatchKey.CompositeExplicitAutograd) +def auto_functionalized_dense( + _mutable_op: torch._ops.OpOverload, + _only_clone_these_tensors: Optional[Tuple[str, ...]] = None, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + new_kwargs = dict(**kwargs) + result = [] + + _mutable_args_names = get_mutable_arg_names(_mutable_op) + for name in _mutable_args_names: + if ( + _only_clone_these_tensors is not None + and name not in _only_clone_these_tensors + ): + new_kwargs[name] = kwargs[name] + else: + new_kwargs[name] = ( + clone_preserve_strides(kwargs[name]) + if kwargs[name] is not None + else None + ) + result.append(new_kwargs[name]) + out = _mutable_op(**new_kwargs) + + if isinstance(out, tuple): + return (*out, *result) # type: ignore[return-value] + else: + return (out, *result) # type: ignore[return-value] + + +@auto_functionalized.py_impl(FakeTensorMode) +def auto_functionalized_fake( + mode, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + with mode: + result = auto_functionalized_dense(_mutable_op, **kwargs) + return result + + +@auto_functionalized.py_impl(ProxyTorchDispatchMode) +def auto_functionalized_proxy( + mode, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + if not mode.enable_tracing: + return auto_functionalized(_mutable_op, **kwargs) + + with disable_proxy_modes_tracing(): + out = auto_functionalized(_mutable_op, **kwargs) + + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + out_proxy = mode.tracer.create_proxy( + "call_function", + auto_functionalized, + (_mutable_op,), + proxy_kwargs, + ) + result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + return result + + +auto_functionalized.fallthrough(DispatchKey.AutogradCPU) +auto_functionalized.fallthrough(DispatchKey.AutogradCUDA) + + +def get_mutable_arg_names(op: torch._ops.OpOverload) -> List[str]: + """ + Returns the list of argument names that get mutated according to the + schema. + """ + mutable_args_names = [ + arg.name + for arg in op._schema.arguments + if arg.alias_info is not None and arg.alias_info.is_write + ] + return mutable_args_names + + +def do_auto_functionalize( + op: torch._ops.OpOverload, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Any: + """Functionalizes a call to op(*args, **kwargs) by emitting a call to + `outs = auto_functionalized(op, normalized_kwargs)` + and replacing the mutated (args, kwargs) with the corresponding outputs. + + The normalized_kwargs are just the (args, kwargs), but all in kwarg form. + This makes handling easier for the auto_functionalized HOP. + """ + from torch._subclasses.functional_tensor import PythonFunctionalizeAPI + + ctx = PythonFunctionalizeAPI() + + # All of the (args, kwargs), but all as kwargs. The names for the + # args come from the schema. This makes it easier for us to work with them. + normalized_kwargs = {} + schema = op._schema + for idx, arg in enumerate(schema.arguments): + # NB: torch_dispatch kwargs are the args defined as kwarg-only in the schema + if arg.name in kwargs: + normalized_kwargs[arg.name] = kwargs[arg.name] + elif idx < len(args): + # if its out of bounds we don't need to do anything + # as it means the the optional arg was passed with its default + # value + normalized_kwargs[arg.name] = args[idx] + else: + normalized_kwargs[arg.name] = arg.default_value + + unwrapped_kwargs = ctx.unwrap_tensors(normalized_kwargs) # type: ignore[arg-type] + with ctx.redispatch_to_next(): + unwrapped_outs = auto_functionalized( + op, **unwrapped_kwargs # type: ignore[arg-type] + ) + + # List of the name of args that get mutated (according to the schema) + mutable_args_names = get_mutable_arg_names(op) + + unwrapped_actual_out: Union[Any, Tuple[Any]] = unwrapped_outs[ + : -len(mutable_args_names) + ] + unwrapped_mutable_out = unwrapped_outs[-len(mutable_args_names) :] + + if len(op._schema.returns) == 0: + assert unwrapped_actual_out[0] is None + unwrapped_actual_out = None + elif len(op._schema.returns) == 1: + assert len(unwrapped_actual_out) == 1 + unwrapped_actual_out = unwrapped_actual_out[0] + else: + assert len(unwrapped_actual_out) == len(op._schema.returns) + + for name, unwrapped_out in zip(mutable_args_names, unwrapped_mutable_out): + # Can be None if input was `Tensor(a!)?` + if unwrapped_out is None: + continue + assert isinstance(unwrapped_out, torch.Tensor) + orig_arg = normalized_kwargs[name] + ctx.replace(orig_arg, unwrapped_out) + ctx.commit_update(orig_arg) + ctx.sync(orig_arg) + + return ctx.wrap_tensors(unwrapped_actual_out) # type: ignore[arg-type] + + +@auto_functionalized.py_functionalize_impl +def auto_functionalized_func(ctx, _mutable_op, **kwargs): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + with ctx.redispatch_to_next(): + result = auto_functionalized(_mutable_op, **unwrapped_kwargs) + return ctx.wrap_tensors(result) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py new file mode 100644 index 0000000000000000000000000000000000000000..f4fe64d67f0b1ad35ee9e33cded01cd0b5d75285 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py @@ -0,0 +1,362 @@ +# mypy: allow-untyped-defs +import contextlib + +import torch +import torch._subclasses.functional_tensor + +import torch.utils._pytree as pytree + +from torch._C import DispatchKey +from torch._C._functorch import ( + _add_batch_dim, + get_unwrapped, + is_batchedtensor, + maybe_get_bdim, +) +from torch._functorch.utils import exposed_in +from torch._guards import detect_fake_mode + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + _set_compilation_env, + autograd_not_implemented, + reenter_make_fx, + unique_graph_id, + UnsupportedAliasMutationException, +) + +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + _temp_remove_pre_dispatch_torch_function_mode, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.utils._python_dispatch import _get_current_dispatch_mode + + +@exposed_in("torch") +def cond(pred, true_fn, false_fn, operands): + r""" + Conditionally applies `true_fn` or `false_fn`. + + .. warning:: + `torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types and + doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch. + Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype + + `cond` is structured control flow operator. That is, it is like a Python if-statement, + but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be + capturable using torch.compile and torch.export. + + Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following:: + + def cond(pred, true_branch, false_branch, operands): + if pred: + return true_branch(*operands) + else: + return false_branch(*operands) + + Args: + pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element, + indicating which branch function to apply. + + true_fn (Callable): A callable function (a -> b) that is within the + scope that is being traced. + + false_fn (Callable): A callable function (a -> b) that is within the + scope that is being traced. The true branch and false branch must + have consistent input and outputs, meaning the inputs have to be + the same, and the outputs have to be the same type and shape. + + operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the true/false functions. + + Example:: + + def true_fn(x: torch.Tensor): + return x.cos() + def false_fn(x: torch.Tensor): + return x.sin() + return cond(x.shape[0] > 4, true_fn, false_fn, (x,)) + + Restrictions: + - The conditional statement (aka `pred`) must meet one of the following constraints: + + - It's a `torch.Tensor` with only one element, and torch.bool dtype + + - It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10` + + - The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints: + + - The function signature must match with operands. + + - The function must return a tensor with the same metadata, e.g. shape, + dtype, etc. + + - The function cannot have in-place mutations on inputs or global variables. + (Note: in-place tensor operations such as `add_` for intermediate results + are allowed in a branch) + + .. warning:: + Temporal Limitations: + + - `cond` only supports **inference** right now. Autograd will be supported in the future. + + - The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future. + + """ + + if torch.compiler.is_dynamo_compiling(): + return cond_op(pred, true_fn, false_fn, operands) + + def _validate_input(pred, true_fn, false_fn, operands): + if not isinstance(pred, (bool, torch.Tensor, torch.SymBool)): + raise RuntimeError(f"Expected pred to be bool or tensor, but got {pred}.") + + if isinstance(pred, torch.Tensor) and pred.numel() != 1: + raise RuntimeError( + f"Expected pred to be bool or single-element tensor, but got {pred}." + ) + + if not callable(true_fn) or not callable(false_fn): + raise RuntimeError("Expect both branches to be callbale.") + + if not isinstance(operands, (tuple, list)) or pytree.tree_any( + lambda t: not isinstance(t, torch.Tensor), operands + ): + raise RuntimeError( + "Expect operands to be a tuple of possibly nested dict/list/tuple that only" + f"consists of tensor leaves, but got {operands}." + ) + + _validate_input(pred, true_fn, false_fn, operands) + + if not torch._dynamo.is_dynamo_supported(): + raise RuntimeError("torch.cond requires dynamo support.") + + with _set_compilation_env(): + with torch._dynamo.utils.disable_cache_limit(): + with _temp_remove_pre_dispatch_torch_function_mode(): + return torch.compile(cond_op, backend="eager", fullgraph=True)( + pred, true_fn, false_fn, operands + ) + + +""" +We're going to define a `cond_op` operation. +In order to do this, we need implementations for each of the dispatch keys. +""" +cond_op = HigherOrderOperator("cond") + + +def trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands): + assert isinstance( + operands, (list, tuple) + ), "Cond operands must be a list or tuple of tensors" + assert all( + isinstance(o, torch.Tensor) for o in operands + ), "Cond operands must be a list of tensors" + + true_graph = reenter_make_fx(true_fn)(*operands) + false_graph = reenter_make_fx(false_fn)(*operands) + + true_outs = [] + false_outs = [] + for node in true_graph.graph.nodes: + if node.op == "output": + true_outs.extend(node.args) + + for node in false_graph.graph.nodes: + if node.op == "output": + false_outs.extend(node.args) + + flat_true_outs = pytree.arg_tree_leaves(*true_outs) + flat_false_outs = pytree.arg_tree_leaves(*false_outs) + if len(flat_true_outs) != len(flat_false_outs): + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected to return same number of outputs but got:" + f"\n {true_fn.__name__} returns {len(flat_true_outs)} item(s)" + f"\n {false_fn.__name__} returns {len(flat_false_outs)} item(s)" + ) + + for i in range(0, len(flat_true_outs)): + true_out = flat_true_outs[i] + false_out = flat_false_outs[i] + if true_out.meta["tensor_meta"] != false_out.meta["tensor_meta"]: + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected each tensor to have same metadata but got:" + f"\n {true_fn.__name__} returns {true_out.meta['tensor_meta']}" + f"\n {false_fn.__name__} returns {false_out.meta['tensor_meta']}" + ) + + i, true_name = unique_graph_id(proxy_mode, prefix="true_graph") + + false_name = f"false_graph_{i}" + assert not hasattr(proxy_mode.tracer.root, false_name) + + proxy_mode.tracer.root.register_module(true_name, true_graph) + proxy_mode.tracer.root.register_module(false_name, false_graph) + + args = (pred, true_graph, false_graph, operands) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) + + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="conditional" + ) + + # At this point, we're *guaranteed* that whether an output came from the + # true or false branch is indistinguishable. So, as this is just for tracing + # purposes, choose the true branch. + + # TODO: the unbacked symbol allocations MUST NOT leak out, if you want to + # support this we need to arrange for the reenter_make_fx unbacked SymInts + # to be used, AND we need to arrange for some sort of unification between + # the two branches (but not really unification; e.g., if one branch + # returns [u0] and the other returns [5] this is OK but you MUST NOT + # conclude the result is 5. Also if one branch returns [3] and another + # branch returns [5] you can make it work by immediately allocating a new + # unbacked SymInt here). + ignore_fresh_unbacked = contextlib.nullcontext() + if (fake_mode := detect_fake_mode()) and fake_mode.shape_env: + ignore_fresh_unbacked = fake_mode.shape_env.ignore_fresh_unbacked_symbols() + + # TODO: Uhh.... it shouldn't matter, but changing this to true_fn results in + # a FakeTensorMode error : + # `Current active mode not registered` + # TODO Sometimes the operands are not completely FakeTensor, something seems went wrong in + # dynamo? Because of that it runs real computation sometimes and re-triggering downstream dispatch keys. + with ignore_fresh_unbacked: + out = false_fn(*operands) + + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@cond_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def cond_op_dense(pred, true_fn, false_fn, operands): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + if pred: + return true_fn(*operands) + else: + return false_fn(*operands) + + +cond_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(cond_op, deferred_error=True) +) + + +@cond_op.py_impl(ProxyTorchDispatchMode) +def inner(mode, pred, true_fn, false_fn, operands): + if mode.enable_tracing: + return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands) + else: + return cond_op(pred, true_fn, false_fn, operands) + + +@cond_op.py_impl(FakeTensorMode) +def cond_fake_tensor_mode(mode, pred, true_fn, false_fn, operands): + # Ignore here, because if you've gotten here but you're not manually + # tracing the inner graphs, that means that you intend to reuse the graph + # directly. Which means the old unbacked symbol bindings are appropriate. + # This strategy will not work if unbacked symbols can escape. + ignore_fresh_unbacked = contextlib.nullcontext() + if mode.shape_env: + ignore_fresh_unbacked = mode.shape_env.ignore_fresh_unbacked_symbols() + + with mode, ignore_fresh_unbacked: + true_outs = true_fn(*operands) + flat_true_outs = pytree.tree_leaves(true_outs) + flat_false_outs = pytree.tree_leaves(false_fn(*operands)) + if len(flat_true_outs) != len(flat_false_outs): + raise RuntimeError("Unmatched number of outputs from cond() branches.") + + for true_out, false_out in zip(flat_true_outs, flat_false_outs): + true_meta = _extract_tensor_metadata(true_out) + false_meta = _extract_tensor_metadata(false_out) + if true_meta != false_meta: + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected each tensor to have same metadata but got:" + f"\n {true_fn.__name__} returns {true_meta}" + f"\n {false_fn.__name__} returns {false_meta}" + ) + return true_outs + + +@cond_op.py_functionalize_impl +def cond_func(ctx, pred, true_fn, false_fn, inputs): + unwrapped_inputs = ctx.unwrap_tensors(inputs) + unwrapped_pred = ctx.unwrap_tensors(pred) + with ctx.redispatch_to_next() as m: + functional_true = ctx.functionalize(true_fn) + functional_false = ctx.functionalize(false_fn) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + for branch in [functional_true, functional_false]: + if _has_potential_branch_input_mutation( + branch, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + "One of torch.cond branch might be modifying the input!" + ) + for branch in [true_fn, false_fn]: + if _has_potential_branch_input_alias( + branch, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + "One of torch.cond branch might be aliasing the input!" + ) + + cond_return = cond_op( + unwrapped_pred, functional_true, functional_false, unwrapped_inputs + ) + return ctx.wrap_tensors(cond_return) + + +@cond_op.py_impl(torch._C._functorch.TransformType.Vmap) +def cond_batch_rule(interpreter, pred, true_fn, false_fn, inputs): + assert isinstance( + inputs, (list, tuple) + ), "Cond inputs must be a list or tuple of tensors" + assert all( + isinstance(i, torch.Tensor) for i in inputs + ), "Cond inputs must be a list of tensors" + + pred_ = get_unwrapped(pred) if is_batchedtensor(pred) else pred + + # unbatched tensors are not vmapped + tensors, in_dims = zip( + *[ + (get_unwrapped(t), maybe_get_bdim(t)) if is_batchedtensor(t) else (t, None) + for t in inputs + ] + ) + + if is_batchedtensor(pred): + # prepend "pred" and vmap everything + tensors = (pred_,) + tensors + in_dims = (0,) + in_dims + + def fn(p, *args): + t = true_fn(*args) + f = false_fn(*args) + return torch.where(p, t[0], f[0]) + + with interpreter.lower(): + result = torch.vmap(fn, in_dims=in_dims)(*tensors) + + else: + # predicate is known at this stage and it is a boolean expression or a + # tensor with one element. + true_fn = torch.vmap(true_fn, in_dims=in_dims) + false_fn = torch.vmap(false_fn, in_dims=in_dims) + + with interpreter.lower(): + result = cond_op(pred, true_fn, false_fn, tensors) + + if not isinstance(result, tuple): + result = (result,) + lvl = interpreter.level() + return tuple([_add_batch_dim(r, 0, lvl) for r in result]) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py new file mode 100644 index 0000000000000000000000000000000000000000..a8da01fe06ecd42e46892280f2db27cc076a3aac --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py @@ -0,0 +1,225 @@ +# mypy: allow-untyped-defs +from enum import Enum +from typing import Any, Dict, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + + +class _EffectType(Enum): + ORDERED = "Ordered" + + +SIDE_EFFECTS: Dict[torch._ops.OpOverload, _EffectType] = { + torch.ops.aten._print.default: _EffectType.ORDERED, +} + + +def _register_effectful_op(op: torch._ops.OpOverload, effect: _EffectType): + assert isinstance(op, torch._ops.OpOverload) and not has_aliasing(op) + if op in SIDE_EFFECTS and SIDE_EFFECTS[op] != effect: + raise RuntimeError( + f"Already registered effect type {SIDE_EFFECTS[op]} to op {op}, " + f"trying to register a different effect type {effect}." + ) + SIDE_EFFECTS[op] = effect + + +class WithEffects(HigherOrderOperator): + """ + with_effects(token, op, args, kwargs) -> (new_token, op_results) + + This HOP helps ensure ordering between side effectful ops like prints or ops + using torchbind objects. This is needed to ensure a traced graph from + AOTAutograd is functional so that future optimization passes do not reorder + these operators. This is done through threading "effect tokens" through the + graph to enforce data dependence between side effectful ops. + + The tokens are basically dummy values (torch.tensor([])). We create a token + per "effect type", which are enumerated in the _EffectType enum. + """ + + def __init__(self): + super().__init__("with_effects") + + def __call__( + self, + token, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], + ) -> Tuple[Any, ...]: + assert isinstance(op, torch._ops.OpOverload) + assert not has_aliasing(op), "Ops with aliasing is not supported" + assert has_effects(op, args, kwargs) + assert isinstance(kwargs, dict) + return super().__call__(token, op, *args, **kwargs) + + +with_effects = WithEffects() + + +def has_aliasing(op: torch._ops.OpOverload): + for arg in op._schema.arguments: + if arg.alias_info is not None: + return True + for arg in op._schema.returns: + if arg.alias_info is not None: + return True + return False + + +def has_effects(op, args, kwargs) -> bool: + # Skip over the profiler's RecordFunction as they should not show up in the graph + _skip_ops = {torch.ops.profiler._record_function_exit._RecordFunction} + if op in _skip_ops: + return False + + return ( + isinstance(op, torch._ops.OpOverload) + and not has_aliasing(op) + and get_effect_key(op, args, kwargs) is not None + ) + + +def get_effect_key(op, args, kwargs) -> Optional[_EffectType]: + if op in SIDE_EFFECTS: + return SIDE_EFFECTS[op] + + for arg in args: + if isinstance(arg, torch.ScriptObject): + # Add it to the table so that next time we see the same op we don't + # have to parse through the args again + SIDE_EFFECTS[op] = _EffectType.ORDERED + return _EffectType.ORDERED + + return None + + +@with_effects.py_impl(DispatchKey.CompositeExplicitAutograd) +def with_effects_dense( + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + out = op(*args, **kwargs) + new_token = torch.tensor([]) + if isinstance(out, tuple): + return (new_token, *out) + return (new_token, out) + + +@with_effects.py_impl(FakeTensorMode) +def with_effects_fake( + mode, + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + with mode: + result = with_effects_dense(token, op, *args, **kwargs) + return result + + +@with_effects.py_impl(ProxyTorchDispatchMode) +def with_effects_proxy( + mode, + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + if not mode.enable_tracing: + return with_effects(token, op, *args, **kwargs) + + with disable_proxy_modes_tracing(): + out = with_effects(token, op, *args, **kwargs) + + proxy_token = mode.tracer.unwrap_proxy(token) + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + + out_proxy = mode.tracer.create_proxy( + "call_function", + with_effects, + (proxy_token, op, *proxy_args), + proxy_kwargs, + ) + result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + return result + + +with_effects.fallthrough(DispatchKey.AutogradCPU) +with_effects.fallthrough(DispatchKey.AutogradCUDA) + + +def handle_effects( + allow_token_discovery: bool, + tokens: Dict[_EffectType, torch.Tensor], + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], +) -> Any: + """ + Args: + allow_token_discovery: Whether or not we are discovering tokens. If this + is true, we will create a token for every side effect type seen that + does not have a token assigned yet. If this is false, the tokens + should've all been created ahead of time, so we will error if there is + no token mapping to every effect type. + + tokens: Map of effect type to tokens. This is to chain operators of the + same effects together so that they do not get reordered in later + optimization passes. + """ + + # Get a token. We can't do `tokens.get(op, torch.tensor([]))` because + # this will create an empty tensor during proxy mode tracing if the token + # doesn't exist. But the tokens should always exist during proxy mode tracing. + key = get_effect_key(op, args, kwargs) + assert key is not None + if key not in tokens: + assert ( + allow_token_discovery + ), f"Could not find a token for effect {key} which came from the function {op}" + tokens[key] = torch.tensor([]) + token = tokens[key] + + from torch._subclasses.functional_tensor import PythonFunctionalizeAPI + + ctx = PythonFunctionalizeAPI() + + unwrapped_token = ctx.unwrap_tensors([token])[0] # type: ignore[arg-type] + unwrapped_args = ctx.unwrap_tensors(args) # type: ignore[arg-type] + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type] + with ctx.redispatch_to_next(): + (new_token, *unwrapped_outs) = with_effects( + unwrapped_token, op, *unwrapped_args, **unwrapped_kwargs # type: ignore[arg-type] + ) + + if len(op._schema.returns) == 0: + assert unwrapped_outs[0] is None + unwrapped_outs = None # type: ignore[assignment] + elif len(op._schema.returns) == 1: + assert len(unwrapped_outs) == 1 + unwrapped_outs = unwrapped_outs[0] + else: + assert len(unwrapped_outs) == len(op._schema.returns) + + # Add the newly created token into the tokens map for a following call to + # use this token. + wrapped_token = ctx.wrap_tensors(new_token) + assert isinstance(wrapped_token, torch.Tensor) + tokens[key] = wrapped_token + + return ctx.wrap_tensors(unwrapped_outs) # type: ignore[arg-type] diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/flex_attention.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/flex_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..c2efa3b48b7fd8929ae6e1a85f0ab9c09a3a2872 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/flex_attention.py @@ -0,0 +1,681 @@ +# mypy: allow-untyped-defs +from typing import Any, Callable, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch._C import DispatchKey +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_mutation, + autograd_not_implemented, + reenter_make_fx, + UnsupportedAliasMutationException, +) +from torch._ops import HigherOrderOperator +from torch._subclasses import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + make_fx, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.fx.graph_module import GraphModule + +from torch.overrides import TorchFunctionMode + + +def transform_getitem_args(x: torch.Tensor, index_args) -> Tuple[Any, ...]: + if isinstance(index_args, tuple): + return (x, list(index_args)) + elif not isinstance(index_args, (list, tuple)): + return (x, [index_args]) + return (x, index_args) + + +class TransformGetItemToIndex(TorchFunctionMode): + # This is needed since we want to support calling + # A[q_idx], where q_idx is a scalar tensor in score_mod. + # Today, when q_idx is a scalar tensor, we implicitly convert it to a python + # scalar and create a view. We do not want that behavior in this case, so we + # use this torchfunctionmode to override that behavior for score_mod + # wherever we're running it. + def __torch_function__(self, func, types, args, kwargs=None): + if func == torch.Tensor.__getitem__: + return torch.ops.aten.index(*transform_getitem_args(*args)) + return func(*args, **(kwargs or {})) + + +class FlexAttentionHOP(HigherOrderOperator): + def __init__(self): + super().__init__("flex_attention") + + def __call__( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if not all(isinstance(buf, torch.Tensor) for buf in other_buffers): + raise RuntimeError("Other buffers must be tensors.") + return super().__call__(query, key, value, score_mod, *other_buffers) + + +flex_attention = FlexAttentionHOP() +flex_attention.__module__ = "torch.ops.higher_order" + + +class FlexAttentionBackwardHOP(HigherOrderOperator): + def __init__(self): + super().__init__("flex_attention_backward") + + def __call__( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Union[Callable, GraphModule], + joint_graph: GraphModule, + *other_buffers: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + if not all(isinstance(buf, torch.Tensor) for buf in other_buffers): + raise RuntimeError("Other buffers must be tensors.") + return super().__call__( + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + + +flex_attention_backward = FlexAttentionBackwardHOP() +flex_attention_backward.__module__ = "torch.ops.higher_order" + + +def math_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Eager implementation + + This implementation uses vmap to vectorize the score_mod function over the batch, head, m, and n dimensions. + We then apply the vectorized score_mod function to the scores matrix. Each wrap of vmap applies one of the + batch, head, m, or n dimensions. We need to apply vmap 4 times to vectorized over all 4 dimensions. + + Args: + query: The query tensor + key: The key tensor + value: The value tensor + score_mod: The score_mod function + other_buffers: Other buffers that are passed to the score_mod function + """ + working_precision = torch.float64 if query.dtype == torch.float64 else torch.float32 + + scores = (query @ key.transpose(-2, -1)).to(dtype=working_precision) + + b = torch.arange(0, scores.size(0), device=scores.device) + h = torch.arange(0, scores.size(1), device=scores.device) + m = torch.arange(0, scores.size(2), device=scores.device) + n = torch.arange(0, scores.size(3), device=scores.device) + + in_dim_buffers = (None,) * len(other_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, None, None, None, 0) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, None, None, 0, None) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers) + + # todo: We wouldn't need these overrides in this file if Dynamo always did the + # rewriting. + with TransformGetItemToIndex(): + scores = score_mod(scores, b, h, m, n, *other_buffers).to(working_precision) + + # TODO Unconditionally return logsumexp for backwards + # if any(t.requires_grad for t in (query, key, value)): + logsumexp = scores.logsumexp(dim=-1) + + scores = scores.softmax(dim=-1) + + return scores.to(query.dtype) @ value, logsumexp + + +@flex_attention.py_impl(DispatchKey.CompositeExplicitAutograd) +def sdpa_dense( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + out, lse = math_attention(query, key, value, score_mod, *other_buffers) + out = out.contiguous() + return out, lse + + +def trace_flex_attention( + proxy_mode: ProxyTorchDispatchMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Traces the flex_attention operator with the given score_mod function and other_buffers. + + Trace SDPA will call make_fx with "fake" example vals and then trace the score_mod function + This will produce a GraphModule that will be stored on the root tracer as "sdpa_score". We + access this graph module in inductor to inline the score_mod function to the triton template. + """ + example_out = flex_attention(query, key, value, score_mod, *other_buffers) + example_vals = [ + torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad) + ] + [torch.zeros((), dtype=torch.int) for _ in range(4)] + with TransformGetItemToIndex(): + score_graph = reenter_make_fx(score_mod)(*example_vals, *other_buffers) + qualname = proxy_mode.tracer.get_fresh_qualname("sdpa_score") + proxy_mode.tracer.root.register_module(qualname, score_graph) + node_args = (query, key, value, score_graph, *other_buffers) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", flex_attention, proxy_args, {} + ) + return track_tensor_tree( + example_out, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + +@flex_attention.py_impl(ProxyTorchDispatchMode) +def flex_attention_proxy_torch_dispatch_mode( + mode: ProxyTorchDispatchMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + assert mode is not None, "Mode should always be enabled for python fallback key" + if mode.enable_tracing: + return trace_flex_attention(mode, query, key, value, score_mod, *other_buffers) + else: + return flex_attention(query, key, value, score_mod, *other_buffers) + + +@flex_attention.py_functionalize_impl +def flex_attention_functionalize( + ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Defines the functionalization rules for the flex_attention operator. + + Write now we are unwrapping each tensor and then redispatching to the next, however we want to + guard against any mutations in the score_mod function, to the other_buffers since those + are free variables. + """ + query_unwrapped = ctx.unwrap_tensors(query) + key_unwrapped = ctx.unwrap_tensors(key) + value_unwrapped = ctx.unwrap_tensors(value) + other_buffers_unwrapped = ctx.unwrap_tensors(other_buffers) + + # Appease the mypy overlords + assert isinstance(query_unwrapped, torch.Tensor) + assert isinstance(key_unwrapped, torch.Tensor) + assert isinstance(value_unwrapped, torch.Tensor) + assert isinstance(other_buffers_unwrapped, tuple) + assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped) + + example_vals = ( + [torch.zeros((), dtype=query.dtype)] + + [torch.zeros((), dtype=torch.int) for _ in range(4)] + + list(other_buffers_unwrapped) + ) + with ctx.redispatch_to_next() as m: + functional_score_mod = ctx.functionalize(score_mod) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + with TransformGetItemToIndex(): + mutates = _has_potential_branch_input_mutation( + functional_score_mod, example_vals, pre_dispatch + ) + # The only care about mutations of existing buffers since we can't replay these. + # However, we can just error if anything is detected + if mutates: + raise UnsupportedAliasMutationException("Mutations detected in score_mod") + + out = flex_attention( + query_unwrapped, + key_unwrapped, + value_unwrapped, + functional_score_mod, + *other_buffers_unwrapped, + ) + return ctx.wrap_tensors(out) # type: ignore[return-value, arg-type] + + +@flex_attention.py_impl(FakeTensorMode) +def flex_attention_fake_tensor_mode( + mode: FakeTensorMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: Tuple[torch.Tensor, ...], +) -> Tuple[torch.Tensor, torch.Tensor]: + with mode: + batch_size, num_heads, seq_len_q, _ = query.shape + logsumexp = query.new_empty( + batch_size, num_heads, seq_len_q, dtype=torch.float32 + ) + return torch.empty_like(query, memory_format=torch.contiguous_format), logsumexp + + +# ---------------------------- Autograd Implementation ---------------------------- +def create_fw_bw_graph(score_mod, index_values, other_buffers): + # See Note:[HOP create fw_bw graph] + + # All of these imports need to be here in order to avoid circular dependencies + from torch._dispatch.python import suspend_functionalization + from torch._functorch.aot_autograd import AOTConfig, create_joint + from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode + + from torch._subclasses.functional_tensor import disable_functional_mode + from torch.fx.experimental.proxy_tensor import disable_proxy_modes_tracing + + dummy_aot_config = AOTConfig( + fw_compiler=None, # type: ignore[arg-type] + bw_compiler=None, # type: ignore[arg-type] + partition_fn=None, # type: ignore[arg-type] + decompositions={}, + num_params_buffers=0, + aot_id=0, + keep_inference_input_mutations=False, + ) + + with suspend_functionalization(), disable_functional_mode(): + with disable_proxy_modes_tracing(): + + def _from_fun(t): + return torch.empty_strided( + t.size(), + t.stride(), + device=t.device, + dtype=t.dtype, + requires_grad=t.requires_grad, + ) + + # If someone runs this hop under the default compiler backend ("eager") + # Then this path will be run with the actual user inputs. We convert them + # to fake tensors in order to not perform any actual compute. + from torch._guards import detect_fake_mode + + fake_mode = detect_fake_mode(index_values) + if fake_mode is None: + fake_mode = FakeTensorMode(allow_non_fake_inputs=True) + + with fake_mode: + unwrapped_score_mod_indexes = pytree.tree_map(_from_fun, index_values) + unwrapped_other_buffers = pytree.tree_map(_from_fun, other_buffers) + + assert all(isinstance(t, FakeTensor) for t in unwrapped_score_mod_indexes) + assert all(isinstance(t, FakeTensor) for t in unwrapped_other_buffers) + + example_flat_out = pytree.tree_map( + _from_fun, + score_mod(*unwrapped_score_mod_indexes, *unwrapped_other_buffers), + ) + if not isinstance(example_flat_out, torch.Tensor): + raise RuntimeError( + "Expected output of score_mod to be a tensor." + f"Got type {type(example_flat_out)}." + ) + example_grad = _from_fun(example_flat_out) + + def joint_f(score, b, h, m, n, example_grad, *other_buffers): + def fw_with_masks(*args): + fw_out = score_mod(*args) + out_requires_grad = fw_out.requires_grad + return ((fw_out,), (out_requires_grad,)) + + joint = create_joint(fw_with_masks, aot_config=dummy_aot_config) + args = [score, b, h, m, n] + list(other_buffers) + optional_grad = [example_grad] if example_grad.requires_grad else [] + _, grads = joint(args, optional_grad) + + return grads + + joint_graph = make_fx(joint_f)( + *unwrapped_score_mod_indexes, example_grad, *unwrapped_other_buffers + ) + return score_mod, joint_graph + + +class FlexAttentionAutogradOp(torch.autograd.Function): + @staticmethod + def forward( + ctx, query, key, value, fw_graph, joint_graph, *other_buffers + ) -> Tuple[torch.Tensor, torch.Tensor]: + any_buffer_requires_grad = any(buffer.requires_grad for buffer in other_buffers) + assert ( + not any_buffer_requires_grad + ), "Captured buffers that require grad are not yet supported." + ctx._fw_graph = fw_graph + ctx._joint_graph = joint_graph + with torch._C._AutoDispatchBelowAutograd(): + out, logsumexp = flex_attention(query, key, value, fw_graph, *other_buffers) + + ctx.save_for_backward(query, key, value, out, logsumexp, *other_buffers) + return out, logsumexp + + @staticmethod + def backward(ctx, grad_out, logsumexp_grad): + fw_args = ctx.saved_tensors + query, key, value, out, logsumexp, *other_buffers = fw_args + fw_graph = ctx._fw_graph + joint_graph = ctx._joint_graph + # We have asserted that other_buffers do not require grad in the forward + none_grads = [None] * (2 + len(other_buffers)) + grad_query, grad_key, grad_value = flex_attention_backward( + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + return grad_query, grad_key, grad_value, *none_grads + + +@flex_attention.py_impl(DispatchKey.Autograd) +def flex_attention_autograd( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + score_mod: Callable, + *other_buffers: Tuple[torch.Tensor, ...], +) -> Tuple[torch.Tensor, torch.Tensor]: + with TransformGetItemToIndex(): + input_requires_grad = any(t.requires_grad for t in (query, key, value)) + if torch.is_grad_enabled() and input_requires_grad: + example_vals = [ + torch.zeros((), dtype=query.dtype, requires_grad=input_requires_grad) + ] + [torch.zeros((), dtype=torch.int) for _ in range(4)] + fw_graph, bw_graph = create_fw_bw_graph( + score_mod, example_vals, other_buffers + ) + else: + fw_graph, bw_graph = score_mod, None + out, logsumexp = FlexAttentionAutogradOp.apply( + query, key, value, fw_graph, bw_graph, *other_buffers + ) + return out, logsumexp + + +# ---------------------------- Backward HOP Implementation ---------------------------- + + +@flex_attention_backward.py_impl(DispatchKey.CompositeExplicitAutograd) +def sdpa_dense_backward( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Callable, # GraphModule type hint? + joint_graph: Callable, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + working_precision = torch.float64 if query.dtype == torch.float64 else torch.float32 + scores = (query @ key.transpose(-2, -1)).to(working_precision) + + b = torch.arange(0, scores.size(0), device=scores.device) + h = torch.arange(0, scores.size(1), device=scores.device) + m = torch.arange(0, scores.size(2), device=scores.device) + n = torch.arange(0, scores.size(3), device=scores.device) + + in_dim_buffers = (None,) * len(other_buffers) + score_mod = torch.vmap(fw_graph, in_dims=(0, None, None, None, 0) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, None, None, 0, None) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers) + score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers) + + with TransformGetItemToIndex(): + post_mod_scores = score_mod(scores, b, h, m, n, *other_buffers).to( + working_precision + ) + + softmax_scores = torch.exp(post_mod_scores - logsumexp.unsqueeze(-1)) + + grad_value = softmax_scores.to(query.dtype).transpose(-2, -1) @ grad_out + + grad_softmax_scores = grad_out @ value.transpose(-2, -1) + + sum_scores = torch.sum(out * grad_out, -1, keepdim=True) + grad_score_mod = softmax_scores * (grad_softmax_scores - sum_scores) + + # Gradient of the inline score_mod function, with respect to the scores + in_dim_buffers = (None,) * len(other_buffers) + out_dims = [0, None, None, None, None] + [None] * len(other_buffers) + joint_score_mod = torch.vmap( + joint_graph, + in_dims=(0, None, None, None, 0, 0) + in_dim_buffers, + out_dims=out_dims, + ) + joint_score_mod = torch.vmap( + joint_score_mod, + in_dims=(0, None, None, 0, None, 0) + in_dim_buffers, + out_dims=out_dims, + ) + joint_score_mod = torch.vmap( + joint_score_mod, + in_dims=(0, None, 0, None, None, 0) + in_dim_buffers, + out_dims=out_dims, + ) + joint_score_mod = torch.vmap( + joint_score_mod, + in_dims=(0, 0, None, None, None, 0) + in_dim_buffers, + out_dims=out_dims, + ) + with TransformGetItemToIndex(): + grad_scores, *_ = joint_score_mod( + scores, b, h, m, n, grad_score_mod, *other_buffers + ) + grad_scores = grad_scores.to(query.dtype) + + grad_query = grad_scores @ key + grad_key = grad_scores.transpose(-2, -1) @ query + return grad_query.contiguous(), grad_key.contiguous(), grad_value.contiguous() + + +def trace_flex_attention_backward( + proxy_mode: ProxyTorchDispatchMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Union[Callable, GraphModule], + joint_graph: GraphModule, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """We already have the forward graph and joint graph from the forward pass, so we create a proxy attach both graphs""" + example_out = flex_attention_backward( + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + + fw_example_vals = [ + torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad) + ] + [torch.zeros((), dtype=torch.int) for _ in range(4)] + bw_example_vals = fw_example_vals + [torch.zeros((), dtype=query.dtype)] + with TransformGetItemToIndex(): + fw_graph = reenter_make_fx(fw_graph)(*fw_example_vals, *other_buffers) + joint_graph = reenter_make_fx(joint_graph)(*bw_example_vals, *other_buffers) + proxy_mode.tracer.root.register_module("fw_graph", fw_graph) + proxy_mode.tracer.root.register_module("joint_graph", joint_graph) + node_args = ( + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", + flex_attention_backward, + proxy_args, + {}, + name="flex_attention_backward", + ) + return track_tensor_tree( + example_out, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + +@flex_attention_backward.py_impl(ProxyTorchDispatchMode) +def flex_attention_backward_proxy_torch_dispatch_mode( + mode: ProxyTorchDispatchMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Union[Callable, GraphModule], + joint_graph: GraphModule, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + assert mode is not None, "Mode should always be enabled for python fallback key" + if mode.enable_tracing: + return trace_flex_attention_backward( + mode, + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + else: + return flex_attention_backward( + query, + key, + value, + out, + logsumexp, + grad_out, + fw_graph, + joint_graph, + *other_buffers, + ) + + +@flex_attention_backward.py_functionalize_impl +def flex_attention_backward_functionalize( + ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Union[Callable, GraphModule], + joint_graph: GraphModule, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Defines the functionalization rules for the flex_attention operator. + + Write now we are unwrapping each tensor and then redispatching to the next, + since we know that the forward score mod function is assured to be free of mutations + to the other_buffers, we skip that mutate check and go straight to redispatching. + """ + query_unwrapped = ctx.unwrap_tensors(query) + key_unwrapped = ctx.unwrap_tensors(key) + value_unwrapped = ctx.unwrap_tensors(value) + out_unwrapped = ctx.unwrap_tensors(out) + logsumexp_unwrapped = ctx.unwrap_tensors(logsumexp) + grad_out_unwrapped = ctx.unwrap_tensors(grad_out) + other_buffers_unwrapped = ctx.unwrap_tensors(other_buffers) + + # Appease the mypy overlords + assert isinstance(query_unwrapped, torch.Tensor) + assert isinstance(key_unwrapped, torch.Tensor) + assert isinstance(value_unwrapped, torch.Tensor) + assert isinstance(out_unwrapped, torch.Tensor) + assert isinstance(logsumexp_unwrapped, torch.Tensor) + assert isinstance(grad_out_unwrapped, torch.Tensor) + assert isinstance(other_buffers_unwrapped, tuple) + assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped) + + with ctx.redispatch_to_next() as m: + functional_fw_graph = ctx.functionalize(fw_graph) + functional_joint_graph = ctx.functionalize(joint_graph) + + grad_query, grad_key, grad_value = flex_attention_backward( + query_unwrapped, + key_unwrapped, + value_unwrapped, + out_unwrapped, + logsumexp_unwrapped, + grad_out_unwrapped, + functional_fw_graph, # type: ignore[arg-type] + functional_joint_graph, # type: ignore[arg-type] + *other_buffers_unwrapped, + ) + + return ctx.wrap_tensors((grad_query, grad_key, grad_value)) # type: ignore[return-value,arg-type] + + +@flex_attention_backward.py_impl(FakeTensorMode) +def flex_attention_backward_fake_tensor_mode( + mode: FakeTensorMode, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + out: torch.Tensor, + logsumexp: torch.Tensor, + grad_out: torch.Tensor, + fw_graph: Union[Callable, GraphModule], + joint_graph: GraphModule, + *other_buffers: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + with mode: + grad_query = torch.empty_like(query, memory_format=torch.contiguous_format) + grad_key = torch.empty_like(key, memory_format=torch.contiguous_format) + grad_value = torch.empty_like(value, memory_format=torch.contiguous_format) + return grad_query, grad_key, grad_value + + +flex_attention_backward.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(flex_attention_backward, deferred_error=True) +) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/map.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/map.py new file mode 100644 index 0000000000000000000000000000000000000000..f5bf1d43c19f56baf7a9f10a41fdece88a4003bf --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/map.py @@ -0,0 +1,351 @@ +# mypy: allow-untyped-defs +import torch +import torch.utils._pytree as pytree +from torch._C import DispatchKey +from torch._dispatch.python import suspend_functionalization +from torch._functorch.aot_autograd import AOTConfig, create_joint, from_fun + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + reenter_make_fx, + UnsupportedAliasMutationException, +) +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch._subclasses.functional_tensor import ( + disable_functional_mode, + FunctionalTensor, +) +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + make_fx, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.multiprocessing.reductions import StorageWeakRef + + +# TODO: We add this to prevent dymamo from tracing into map_wrapper, +# remove the wrapper call when it's ready. +class MapWrapper(HigherOrderOperator): + def __call__(self, xs, *args): + return map_wrapper(xs, *args) + + +map = MapWrapper("map") +map_impl = HigherOrderOperator("map_impl") + +dummy_aot_config = AOTConfig( + fw_compiler=None, # type: ignore[arg-type] + bw_compiler=None, # type: ignore[arg-type] + partition_fn=None, # type: ignore[arg-type] + decompositions={}, + num_params_buffers=0, + aot_id=0, + keep_inference_input_mutations=False, +) + + +def create_fw_bw_graph(f, num_mapped_args, *args): + mapped_xs = args[:num_mapped_args] + pos_args = args[num_mapped_args:] + + # Note:[HOP create fw_bw graph] We create "clean" environments for make_fx by suspending all dispatch keys + # between Autograd and Python key. Currently, we only suspend functionalization but more can be + # added when required. Will encounter two problems if we don't suspend functionalization: + # + # 1. make_fx fails to capture operations on input: the inputs are wrapped as _to_functional_tensor_wrapper, + # but they will be unwrapped before entering ProxyTorchDispatchMode as part of the dispatching. + # However, it's the outside wrapper that tracer creates proxies for. This casuses tracer fail to + # fetch the proxy for the inputs and fail to capture any operations on them. + # + # 2. make_fx fails to capture output: the outputs after ProxyTorchDispatchMode are further + # wrapped as FunctionalTensorWrapper in Functionalize key after return. However, the tracer + # only associates the inner tensor with proxy in ProxyTorchDispatchMode. Therefore, + # when creating the output node, it fails to associate the wrapped tensor with its proxy. + # Instead, it will create _tensor_constant as output. + + with suspend_functionalization(), disable_functional_mode(): + with disable_proxy_modes_tracing(): + + def _from_fun(t): + if isinstance(t, torch.Tensor): + if t.dtype != torch.bool: + return torch.empty_strided( + t.size(), + t.stride(), + dtype=t.dtype, + requires_grad=t.requires_grad, + ) + else: + # clone of a functional tensor produces a functional tensor + # but we want to avoid it so we clone a non-functional version + maybe_unfunc_t = t + if isinstance(t, FunctionalTensor): + torch._sync(t) + maybe_unfunc_t = from_fun(t) + elif torch._is_functional_tensor(t): + # need to handle both types of functionalization here: + # these are the tensors that came from the user, + # which could be either FunctionalTensorWrapper or FunctionalTensor + torch._sync(t) + maybe_unfunc_t = torch._from_functional_tensor(t) + return maybe_unfunc_t.clone() + return t + + unwrapped_mapped_xs = pytree.tree_map(_from_fun, mapped_xs) + example_xs = _unstack_pytree(unwrapped_mapped_xs)[0] + + example_pos_args = [ + _from_fun(arg) if isinstance(arg, torch.Tensor) else arg + for arg in pos_args + ] + example_flat_out = pytree.tree_map( + _from_fun, f(*example_xs, *example_pos_args) + ) + if any( + not isinstance(out, torch.Tensor) + for out in example_flat_out + if out is not None + ): + raise RuntimeError( + "Expect outputs of map only contains tensors or None. " + f"Got types {[type(out) for out in example_flat_out]}." + ) + example_grad = [_from_fun(out) for out in example_flat_out] + + fw_graph = make_fx(f)(*example_xs, *example_pos_args) + + def joint_f(*example_args): + joint_mapped_args = example_args[:joint_num_mapped] + args = example_args[joint_num_mapped:] + + mapped_input = joint_mapped_args[:num_mapped_args] + mapped_grads = joint_mapped_args[num_mapped_args:] + + def fw_with_masks(*args): + fw_out = f(*args) + return fw_out, [ + True + if isinstance(ret, torch.Tensor) and ret.requires_grad + else False + for ret in fw_out + ] + + joint = create_joint(fw_with_masks, aot_config=dummy_aot_config) + _, grads = joint( + list(mapped_input) + list(args), + [ + grad + for grad in mapped_grads + if grad is not None and grad.requires_grad + ], + ) + + # In order to keep map functional for backward graph, + # we clone outputs that are aliasing inputs + input_storage = { + StorageWeakRef(arg._typed_storage()) + for arg in example_args + if isinstance(arg, torch.Tensor) + } + + def maybe_clone(t): + if ( + isinstance(t, torch.Tensor) + and StorageWeakRef(t._typed_storage()) in input_storage + ): + return t.clone() + return t + + return pytree.tree_map(maybe_clone, grads) + + joint_num_mapped = len(example_grad) + len(example_xs) + joint_graph = make_fx(joint_f)(*example_xs, *example_grad, *example_pos_args) + return fw_graph, joint_graph + + +def map_wrapper(f, xs, *args): + flat_xs, xs_spec = pytree.tree_flatten(xs) + if not all(isinstance(t, torch.Tensor) for t in flat_xs): + raise RuntimeError(f"Mapped xs can only consist of tensors. Got xs {flat_xs}.") + + num_mapped_args = len(flat_xs) + shapes = [xs.shape for xs in flat_xs] + leading_dim_size = shapes[0][0] + if leading_dim_size == 0: + raise RuntimeError("Leading dimensions of mapped xs cannot be 0.") + + if any(cur_shape[0] != leading_dim_size for cur_shape in shapes): + raise RuntimeError( + f"Leading dimensions of mapped xs must be consistent. Got shapes {shapes}." + ) + + out_spec = None + + def flat_fn(*flat_args): + xs = pytree.tree_unflatten(list(flat_args[:num_mapped_args]), xs_spec) + unflattened_out = f(xs, *flat_args[num_mapped_args:]) + flat_out, tmp_out_spec = pytree.tree_flatten(unflattened_out) + + nonlocal out_spec + out_spec = tmp_out_spec + return flat_out + + return pytree.tree_unflatten( + map_impl(flat_fn, flat_xs, args), out_spec # type: ignore[arg-type] + ) + + +class MapAutogradOp(torch.autograd.Function): + @staticmethod + def forward(ctx, fw_graph, joint_graph, num_mapped_args, *flat_args): + ctx.save_for_backward(*flat_args) + ctx._joint_graph = joint_graph + ctx._num_mapped_args = num_mapped_args + with torch._C._AutoDispatchBelowAutograd(): + return ( + *map_impl( + fw_graph, flat_args[:num_mapped_args], flat_args[num_mapped_args:] + ), + ) + + @staticmethod + def backward(ctx, *flat_grads): + fw_args = ctx.saved_tensors + fw_mapped_args = fw_args[: ctx._num_mapped_args] + pos_args = fw_args[ctx._num_mapped_args :] + + grads = map_impl( + ctx._joint_graph, + fw_mapped_args + flat_grads, + pos_args, + ) + return None, None, None, *grads + + +def trace_map(proxy_mode, func_overload, f, xs, pos_args): + leading_dim_size = xs[0].shape[0] + + example_input = _unstack_pytree(xs)[0] + body_graph = f + + body_graph = reenter_make_fx(body_graph)(*example_input, *pos_args) + + next_name = proxy_mode.tracer.get_fresh_qualname("body_graph_") + + proxy_mode.tracer.root.register_module(next_name, body_graph) + + with disable_proxy_modes_tracing(): + example_outs = body_graph(*example_input, *pos_args) + + def expand_tensor(t): + if isinstance(t, torch.Tensor): + return t.expand(leading_dim_size, *t.shape) + return t + + expanded_outs = pytree.tree_map(expand_tensor, example_outs) + + node_args = (body_graph, list(xs), list(pos_args)) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="map_impl" + ) + return track_tensor_tree( + expanded_outs, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + +def _unstack_pytree(xs): + flat_xs, inspec = pytree.tree_flatten(xs) + if not all(isinstance(xs, torch.Tensor) for xs in flat_xs): + raise RuntimeError(f"Leaves of xs must be Tensor {flat_xs}") + + if not all(xs.shape[0] == flat_xs[0].shape[0] for xs in flat_xs): + raise RuntimeError( + f"Leaves of xs must have same leading dimension size {[xs.shape for xs in flat_xs]}" + ) + + a = zip(*flat_xs) + + pytrees = [] + for tuple in a: + pytrees.append(pytree.tree_unflatten(tuple, inspec)) + return pytrees + + +def _stack_pytree(pytrees): + flat_out = [] + out_spec = None + for pt in pytrees: + flat_pt, out_spec = pytree.tree_flatten(pt) + flat_out.append(flat_pt) + assert out_spec is not None + b = zip(*flat_out) + stacked_out = [] + for leaves in b: + if all(isinstance(leaf, torch.Tensor) for leaf in leaves): + stacked_out.append(torch.stack(leaves)) + elif all(leaf is None for leaf in leaves): + # Backward graph can return None output when forward inputs doesn't require grad. + # When we eagerly execute backward graph, we need to call _stack_pytree on its output, + # therefore we need to deal with None output. + stacked_out.append(None) # type: ignore[arg-type] + else: + raise RuntimeError(f"Cannot stack {leaves}.") + return pytree.tree_unflatten(stacked_out, out_spec) + + +@map_impl.py_impl(DispatchKey.CompositeExplicitAutograd) +def map_dense(f, xs, pos_args): + pytrees = [] + for inp in _unstack_pytree(xs): + pytrees.append(f(*inp, *pos_args)) + return _stack_pytree(pytrees) + + +@map_impl.py_impl(DispatchKey.Autograd) +def map_autograd(f, xs, pos_args): + num_mapped_args = len(xs) + fw_graph, bw_graph = create_fw_bw_graph(f, num_mapped_args, *xs, *pos_args) + flat_out = MapAutogradOp.apply(fw_graph, bw_graph, num_mapped_args, *xs, *pos_args) + return flat_out + + +@map_impl.py_impl(ProxyTorchDispatchMode) +def map_proxy_torch_dispatch_mode(mode, f, xs, args): + if mode.enable_tracing: + return trace_map(mode, map_impl, f, xs, args) + else: + return map_impl(f, xs, args) + + +@map_impl.py_impl(FakeTensorMode) +def map_fake_tensor_mode(mode, f, xs, args): + with mode: + return map_dense(f, xs, args) + + +@map_impl.py_functionalize_impl +def map_functionalize(ctx, f, xs, pos_args): + unwrapped_xs = ctx.unwrap_tensors(xs) + unwrapped_args = ctx.unwrap_tensors(pos_args) + wrapped_fn = ctx.functionalize(f) + + with ctx.redispatch_to_next(): + with disable_proxy_modes_tracing(): + example_inputs = (*_unstack_pytree(unwrapped_xs)[0], *unwrapped_args) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + if _has_potential_branch_input_mutation( + f, example_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException("torch.map is mutating the input!") + + if _has_potential_branch_input_alias( + f, example_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException("torch.map is aliasing the input!") + + map_return = map_impl(wrapped_fn, unwrapped_xs, unwrapped_args) + return ctx.wrap_tensors(map_return) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f5e2115aeec7edd946acbf9938a52642c0b61b --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py @@ -0,0 +1,171 @@ +# mypy: allow-untyped-defs + +import torch +import torch.utils._pytree as pytree +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, + maybe_handle_decomp, +) +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch._prims_common import elementwise_dtypes, ELEMENTWISE_TYPE_PROMOTION_KIND +from torch._higher_order_ops.utils import autograd_not_implemented + +# TODO to figure out a more generic approach +ALLOWABLE_OPS = [ + torch.ops.aten.linear.default, + torch.ops.aten.mm.default, + torch.ops.aten.conv2d.default, + torch.ops.aten.convolution.default, + torch.ops.aten.mul.Tensor, + torch.ops.aten.mul.Scalar, + torch.ops.aten.div.Tensor, + torch.ops.aten.div.Scalar, +] + + +class OutDtypeOperator(HigherOrderOperator): + """ + The out_dtype operator takes an existing ATen functional operator, an + `out_dtype` argument, and arguments to the original operator, and executes + the original operator and returns a Tensor with the `out_dtype` precision. + This operator does not mandate a compute precision so it allows the + representation to not be opinionated about the exact implementation. + + The general implementation for all operators will be the following: + 1. Promote inputs dtypes based on default PyTorch dtype promotion rules, + using the dtypes of all input Tensors/Scalars and the `out_dtype` + arugument. + 2. Execute the operator + 3. Cast the output to `out_dtype` + """ + + + def __init__(self): + super().__init__("out_dtype") + # TODO(ydwu4): Subclassing HigherOrderOperator causes __module__ to + # become different (torch._higher_order_ops.out_dtype) which will result + # in torch.fx to record the op incorrectly in the graph. + self.__module__ = "torch.ops.higher_order" + + def __call__(self, op, output_dtype, *args): + if not isinstance(op, torch._ops.OpOverload): + raise ValueError("out_dtype's first argument must be an OpOverload") + if op._schema.is_mutable: + raise ValueError("out_dtype's first argument needs to be a functional operator") + if not ( + len(op._schema.returns) == 1 and + isinstance(op._schema.returns[0].type, torch.TensorType) + ): + raise ValueError( + "out_dtype's can only apply to ops that return a single tensor" + f"Instead got {[r.type for r in op._schema.returns]}" + ) + + if op not in ALLOWABLE_OPS: + raise ValueError( + f"out_dtype only allows the following operators: {ALLOWABLE_OPS}." + ) + + res = super().__call__(op, output_dtype, *args) + + return res + + +out_dtype = OutDtypeOperator() + +def trace_out_dtype(proxy_mode, func_overload, op, output_dtype, *args): + # NB: Long-term we should put the decomposition logic into + # ProxyTorchDispatchMode so that people do not need to call maybe_handle_decomp + # in all HigherOrderOp proxy implementations. + r = maybe_handle_decomp(proxy_mode, func_overload, (op, output_dtype, *args), {}) + if r is not NotImplemented: + return r + + with disable_proxy_modes_tracing(): + # This is a simplified implementation of this operator just for tracing. + # Actual implementation may also first promote the arguments + out = op(*args).to(dtype=output_dtype) + + node_args = (op, output_dtype, *args) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="out_dtype" + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@out_dtype.py_impl(DispatchKey.CompositeExplicitAutograd) +def out_dtype_dense( + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + if is_int_mm(op, output_dtype, args): + return torch._int_mm(*args) + return out_dtype_fallback(op, output_dtype, *args) + + +def is_int_mm(op, output_dtype, args): + return ( + op == torch.ops.aten.mm.default and + output_dtype == torch.int32 and + len(args) == 2 and + args[0].dtype == torch.int8 and + args[1].dtype == torch.int8 and + args[0].is_cuda and + args[1].is_cuda + ) + + +def out_dtype_fallback(op, output_dtype, *args): + flat_inputs = pytree.arg_tree_leaves(*args) + [torch.ones(1, dtype=output_dtype)] + promote_dtype: torch.dtype = elementwise_dtypes( + *flat_inputs, + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + )[0] + + casted_args = pytree.tree_map_only( + torch.Tensor, lambda arg: arg.to(dtype=promote_dtype), args + ) + res = op(*casted_args).to(dtype=output_dtype) + return res + + +out_dtype.py_impl(DispatchKey.Autograd)(autograd_not_implemented(out_dtype, deferred_error=True)) + + +@out_dtype.py_impl(ProxyTorchDispatchMode) +def out_dtype_proxy( + mode: ProxyTorchDispatchMode, + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + if mode.enable_tracing: + return trace_out_dtype(mode, out_dtype, op, output_dtype, *args) + else: + return out_dtype(op, output_dtype, *args) + + +@out_dtype.py_impl(FakeTensorMode) +def out_dtype_fake_tensor_mode( + mode: FakeTensorMode, + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + with mode: + return out_dtype_dense(op, output_dtype, *args) + + +@out_dtype.py_functionalize_impl +def out_dtype_func(ctx, op, output_dtype, *args): + unwrapped_args = tuple(ctx.unwrap_tensors(arg) for arg in args) + + with ctx.redispatch_to_next(): + res = out_dtype(op, output_dtype, *unwrapped_args) + return ctx.wrap_tensors(res) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..d781248a19c9fdc8857d4ed424df27d90208f3cd --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py @@ -0,0 +1,92 @@ +# mypy: allow-untyped-defs +import torch +import torch._subclasses.functional_tensor + +import torch.utils._pytree as pytree + +from torch._C import DispatchKey +from torch._functorch.utils import exposed_in + +from torch._higher_order_ops.utils import _set_compilation_env, autograd_not_implemented +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + make_fx, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.utils._python_dispatch import _get_current_dispatch_mode + + +@exposed_in("torch") +def strict_mode(callable, operands): + if torch.compiler.is_dynamo_compiling(): + return strict_mode_op(callable, operands) + + with _set_compilation_env(): + with torch._dynamo.utils.disable_cache_limit(): + return torch.compile(strict_mode_op, backend="eager", fullgraph=True)( + callable, operands + ) + + +strict_mode_op = HigherOrderOperator("strict_mode") + + +@strict_mode_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def strict_mode_op_dense(callable, operands): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + return callable(*operands) + + +strict_mode_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(strict_mode_op, deferred_error=True) +) + + +@strict_mode_op.py_impl(ProxyTorchDispatchMode) +def inner(mode, callable, operands): + if mode.enable_tracing: + return trace_strict_mode(mode, strict_mode_op, callable, operands) + else: + return strict_mode_op(callable, operands) + + +def trace_strict_mode(mode, strict_mode_op, callable, operands): + pre_dispatch = getattr(mode, "pre_dispatch", False) + + with disable_proxy_modes_tracing(): + graph = make_fx(callable, pre_dispatch=pre_dispatch)(*operands) + + graph_name = mode.tracer.get_fresh_qualname("strict_graph_") + mode.tracer.root.register_module(graph_name, graph) + + args = (graph, operands) + + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + + out_proxy = mode.tracer.create_proxy( + "call_function", strict_mode_op, proxy_args, {}, name="strict_mode" + ) + + out = graph(*operands) + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + + +@strict_mode_op.py_impl(FakeTensorMode) +def strict_mode_fake_tensor_mode(mode, callable, operands): + with mode: + true_outs = callable(*operands) + return true_outs + + +@strict_mode_op.py_functionalize_impl +def strict_mode_func(ctx, callable, inputs): + unwrapped_inputs = ctx.unwrap_tensors(inputs) + with ctx.redispatch_to_next(): + functional_callable = ctx.functionalize(callable) + + cond_return = strict_mode_op(functional_callable, unwrapped_inputs) + return ctx.wrap_tensors(cond_return) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py new file mode 100644 index 0000000000000000000000000000000000000000..744e559e65d0719ed4cd564194ee6e91c99091e7 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py @@ -0,0 +1,119 @@ +# mypy: allow-untyped-defs +import logging +from contextlib import contextmanager + +import torch +from torch._C import DispatchKey # @manual +from torch._functorch._aot_autograd.utils import KNOWN_TYPES +from torch._higher_order_ops.utils import autograd_not_implemented +from torch._library.fake_class_registry import _ns_and_class_name, FakeScriptObject +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.fx.node import has_side_effect +from torch.utils import _pytree as pytree + +log = logging.getLogger(__name__) + +# The call_torchbind operator represents a method invocation on a torchbind +# object. The calling convention is: +# call_torchbind(self: ScriptObject, method_name: str, *method_args, **method_kwargs) +# We do not expect users to write this operator directly. Instead it will be +# emitted by Dynamo when tracing encounters a torchbind object. +call_torchbind = HigherOrderOperator("call_torchbind") + +# Register this operator as side-effectful with FX. +# TODO: this is not really sufficient. While passes (hopefully) check +# Node.is_impure() and make good decisions, we also assume we can execute the +# graph as many times as we want without changing behavior, which is NOT true of +# ops that mutate torchbind object state. +has_side_effect(call_torchbind) + +_orig_scriptmethod_call = torch.ScriptMethod.__call__ + + +def torchbind_method_redispatch(self, *args, **kwargs): + if isinstance(self.raw_owner, torch.ScriptObject): + return call_torchbind(self.raw_owner, self.name, *args, **kwargs) + return _orig_scriptmethod_call(self, *args, **kwargs) + + +@contextmanager +def enable_torchbind_tracing(): + """Context manager that acts as a feature flag to enable torchbind tracing + behavior. Once torchbind tracing has been stabilized, we can remove this and + turn it always on. + """ + try: + KNOWN_TYPES.append(torch.ScriptObject) + torch.ScriptMethod.__call__ = torchbind_method_redispatch # type: ignore[method-assign] + yield + finally: + assert ( + KNOWN_TYPES.pop() is torch.ScriptObject + ), "Someone else messed with KNOWN_TYPES during tracing, exploding." + torch.ScriptMethod.__call__ = _orig_scriptmethod_call # type: ignore[method-assign] + + +@call_torchbind.py_impl(DispatchKey.CompositeExplicitAutograd) +def call_torchbind_impl(obj, method, *args, **kwargs): + if isinstance(obj, torch.ScriptObject): + return _orig_scriptmethod_call(getattr(obj, method), *args, **kwargs) + elif isinstance(obj, FakeScriptObject): + return getattr(obj.wrapped_obj, method)(*args, **kwargs) + else: + raise RuntimeError(f"Unsupported first arg type {type(obj)} for call_torchbind") + + +@call_torchbind.py_impl(ProxyTorchDispatchMode) +def inner(mode, *args, **kwargs): + if mode.enable_tracing: + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + + out_proxy = mode.tracer.create_proxy( + "call_function", + call_torchbind, + proxy_args, + proxy_kwargs, + ) + out = call_torchbind(*args, **kwargs) + + obj, method, *rest_args = args + if isinstance(obj, torch.ScriptObject): + ns, class_name = _ns_and_class_name( + obj._type().qualified_name() # type: ignore[attr-defined] + ) + log.warning( + "Tracing torchbind method %s.%s with real ScriptObject. This may" + " cause the original object being mutated. If this is not intended," + ' You can register a fake class with torch._library.register_fake_class("%s::%s").', + class_name, + method, + ns, + class_name, + ) + + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + else: + return call_torchbind(*args, **kwargs) + + +# TODO: currently we just run the C++ implementation with fake tensors. +# But we should make it possible to register a fake torchbind implementation. +@call_torchbind.py_impl(FakeTensorMode) +def call_torchbind_fake(mode, *args, **kwargs): + with mode: + return call_torchbind_impl(*args, **kwargs) + + +call_torchbind.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(call_torchbind, deferred_error=True) +) + + +@call_torchbind.py_functionalize_impl +def call_torchbind_func(ctx, *args, **kwargs): + args = ctx.unwrap_tensors(args) + with ctx.redispatch_to_next(): + return ctx.wrap_tensors(call_torchbind(*args, **kwargs)) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py new file mode 100644 index 0000000000000000000000000000000000000000..5552ef1ff8b2e6ea49592e841cf0624a06475c13 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py @@ -0,0 +1,737 @@ +# mypy: allow-untyped-defs +import dataclasses +import inspect +import logging +import threading +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union + +import torch.utils._pytree as pytree +from torch import Tensor +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._prims_common import clone_preserve_strides +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + +log = logging.getLogger("torch._dynamo") + + +############################################################################### +# Kernel Side Table + + +# We cannot put Triton Kernels into the FX graph as the graph nodes +# do not support arbitrary functions. +# Use a side table. +# We use two dicts so that fetching both the kernel and id are O(1) +class KernelSideTable: + id_to_kernel: Dict[int, Any] = dict() + kernel_to_id: Dict[Any, int] = dict() + constant_args: Dict[int, Any] = dict() + lock = threading.Lock() + + # Returns index on the table + def add_kernel(self, kernel) -> int: + with self.lock: + if kernel in self.kernel_to_id: + return self.kernel_to_id[kernel] + + idx = len(self.id_to_kernel) + self.id_to_kernel[idx] = kernel + self.kernel_to_id[kernel] = idx + return idx + + # Returns the triton kernel at the given index + def get_kernel(self, idx: int): + # No need to lock here as fetching from dict is atomic + assert idx in self.id_to_kernel + return self.id_to_kernel[idx] + + # Not every constant arg can be added to the graph. Use this side table + # for constant args. + def add_constant_args(self, args) -> int: + with self.lock: + idx = len(self.constant_args) + self.constant_args[idx] = args + return idx + + # Returns the constant args + def get_constant_args(self, idx: int): + # No need to lock here as fetching from dict is atomic + assert idx in self.constant_args + return self.constant_args[idx] + + # Resets the table (only meant to be used in unit tests) + # This is only safe assuming single threaded execution + def reset_table(self) -> None: + self.id_to_kernel = dict() + self.kernel_to_id = dict() + self.constant_args = dict() + + +kernel_side_table = KernelSideTable() + + +############################################################################### +# Mutation Tracker + + +@dataclasses.dataclass(frozen=True) +class Param: + idx: int + + +@dataclasses.dataclass(frozen=True) +class Intermediate: + idx: int + + def fake(self): + return self.idx < 0 + + +@dataclasses.dataclass(frozen=True) +class Op: + name: str + fn_call_name: Optional[str] + args: List[Union[Param, Intermediate]] + ret: Intermediate = dataclasses.field(repr=False) + + def __post_init__(self): + if self.name == "tt.call": + assert self.fn_call_name is not None + else: + assert self.fn_call_name is None + + +def generate_ttir(kernel, kwargs): + """ + Uses Triton's internal code generation to create TTIR + """ + import sympy + import triton + from triton.compiler.compiler import ASTSource + from triton.runtime.autotuner import Autotuner + from triton.runtime.jit import JITFunction + + import torch + import torch._inductor.ir + from torch._subclasses.fake_tensor import FakeTensor + + if isinstance(kernel, Autotuner): + if len(kernel.configs) > 0: + # If we are autotuning, then it doesn't matter which version gets + # picked for tracing purposes, so lets pick the first one + kwargs = {**kwargs, **kernel.configs[0].kwargs} + kernel = kernel.fn + + assert isinstance(kernel, JITFunction) + + if len(kwargs) != len(kernel.arg_names): + raise ValueError("Incorrect number of arguments passed to kernel") + + # Replace all SymExprs with a regular value for TTIR generation + # Replace all FakeTensor/TensorBox with real tensors + # These replacements are needed for triton's type, key and config functions + ordered_args: Dict[str, Any] = {} + for name in kernel.arg_names: + a = kwargs[name] + if isinstance(a, (torch.SymInt, torch.SymFloat, torch.SymBool, sympy.Expr)): + ordered_args[name] = 2 + elif isinstance(a, (FakeTensor, torch._inductor.ir.TensorBox)): + with torch._C._DisableTorchDispatch(): + ordered_args[name] = torch.empty(2, dtype=a.dtype) + else: + ordered_args[name] = a + + ordered_tensor_names = [ + name for name, arg in ordered_args.items() if isinstance(arg, Tensor) + ] + specialization = kernel._get_config(*ordered_args.values()) + constants = { + i: arg + for i, arg in enumerate(ordered_args.values()) + if not isinstance(arg, Tensor) + } + + # Build kernel signature -- doesn't include constexpr arguments. + signature = { + i: kernel._type_of(kernel._key_of(arg)) + for i, arg in enumerate(ordered_args.values()) + if i not in kernel.constexprs + } + + context = triton._C.libtriton.ir.context() + target = triton.runtime.driver.active.get_current_target() + backend = triton.compiler.compiler.make_backend(target) + options = backend.parse_options(dict()) + triton._C.libtriton.ir.load_dialects(context) + backend.load_dialects(context) + + src = ASTSource(kernel, signature, constants, specialization) + + # Triton changes ASTSource.make_ir to take 3 arguments. Handle + # backward compatibility here. + if len(inspect.signature(src.make_ir).parameters) == 2: + ttir_module = src.make_ir(options, context) + else: + codegen_fns = backend.get_codegen_implementation() + ttir_module = src.make_ir(options, codegen_fns, context) + if not ttir_module.verify(): + raise RuntimeError("Verification for TTIR module has failed") + + return ttir_module, ordered_tensor_names + + +def ttir_to_functions(ttir_module) -> Dict[str, Dict[Intermediate, List[Op]]]: + """ + Walk the `ttir_module` bottom up to mine the `functions` from + the structured MLIR entities representing the Triton kernel + (mlir::Operation, mlir::Block, mlir::Region). + """ + functions: Dict[str, Dict[Intermediate, List[Op]]] = {} + + # block id --> op result (Intermediate) --> one or more ops + op_stack: Dict[int, Dict[Intermediate, List[Op]]] = defaultdict( + lambda: defaultdict(list) + ) + region_id_to_block_ids: Dict[int, List[int]] = defaultdict(list) + block_id_to_block_arg_ids: Dict[int, List[int]] = {} + replacements: Dict[int, Union[Intermediate, Param]] = {} + reindex_map: Dict[int, int] = {} + next_fake_intermediate = 0 + + def reindex(idx): + if idx not in reindex_map: + reindex_map[idx] = len(reindex_map) + return reindex_map[idx] + + def mlir_to_functions(op) -> None: + name: str = op.get_name() + if name == "builtin.module": + # this wraps all tt.func ops + return + + operand_ids: List[int] = [ + reindex(op.get_operand(i).id()) for i in range(op.get_num_operands()) + ] + result_ids: List[int] = [ + reindex(op.get_result(i).id()) for i in range(op.get_num_results()) + ] + + child_block_ids: List[int] = [] + for i in [op.get_region(i).id() for i in range(op.get_num_regions())]: + # as the walk is bottom-up, the region_id_to_block_ids[i] + # must be populated by the time we process the enclosing op + child_block_ids.extend(region_id_to_block_ids[i]) + + parent_block_id = -1 + parent_block = op.get_block() + if parent_block is not None: + parent_block_id = parent_block.id() + if parent_block_id not in block_id_to_block_arg_ids: + block_id_to_block_arg_ids[parent_block_id] = [] + for i in range(parent_block.get_num_arguments()): + block_id_to_block_arg_ids[parent_block_id].append( + reindex(parent_block.get_argument(i).id()), + ) + # the region info is collected via ops' parent blocks to be + # used later when the region's encloding op is traversed + parent_region = parent_block.get_parent() + if parent_region is not None: + region_id_to_block_ids[parent_region.id()].append(parent_block_id) + + nonlocal next_fake_intermediate + + if name == "tt.func": + # for function ops: gather and inline + # the ops from all child blocks + fn_ops = defaultdict(list) + for child_block_id in child_block_ids: + for result, block_fn_ops in op_stack.pop(child_block_id).items(): + for block_fn_op in block_fn_ops: + fn_ops[result].append(block_fn_op) + + # replace the corresponding Intermediates in the + # child op args with the function args (Params) + for i, idx in enumerate(block_id_to_block_arg_ids[child_block_ids[0]]): + replacements[idx] = Param(i) + + for fn_op_list in fn_ops.values(): + for fn_op in fn_op_list: + for i in range(len(fn_op.args)): + arg = fn_op.args[i] + seen = set() # to break cycles + # there can be transitive replacements, but likely + # no cycles (we keep the `seen` set just in case) + while ( + isinstance(arg, Intermediate) + and arg.idx in replacements + and arg.idx not in seen + ): + seen.add(arg.idx) + arg = fn_op.args[i] = replacements[arg.idx] + + # next function capture starts + # with empty replacements + replacements.clear() + + fn_name = op.get_str_attr("sym_name") + functions[fn_name] = fn_ops + elif child_block_ids: + if name in {"scf.if", "scf.for", "scf.while", "tt.reduce", "tt.scan"}: + # for blocked ops: inline the enclosed ops into + # the parent block + rewire the last op in each + # child block to return the block result + return_ops = [] + for block_id in child_block_ids: + if name == "scf.for": + # example: + # %result = scf.for %iv = %lb to %ub step %step iter_args(%arg = %init) -> (i32) ... + # block args: 2 (%iv, %arg) + # op operands: 4 (%lb, %ub, %step, %init) + # `%arg` is mapping to `%init` + for i, idx in enumerate(block_id_to_block_arg_ids[block_id]): + if i == 0: + next_fake_intermediate -= 1 + replacements[idx] = Intermediate(next_fake_intermediate) + else: + replacements[idx] = Intermediate(operand_ids[i + 2]) + elif name == "scf.while": + # example: + # %3:3 = scf.while (%arg2 = %1, %arg3 = %2, %arg4 = %c0_i32_8) ... + # block args: 3 (%arg2, %arg3, %arg4) + # op operands: 3 (%1, %2, %c0_i32_8) + # `%arg2` is mapping to `%1`, `%arg3` is mapping to `%2`, ... + for i, idx in enumerate(block_id_to_block_arg_ids[block_id]): + replacements[idx] = Intermediate(operand_ids[i]) + elif name == "scf.if": + # the scf block args are ignored by the pass. but, as they + # may be used as operands of the ops inside the block + # (and nested blocks inlined in the current block by now), + # they are replaced by new fake Intermediates to avoid "this + # operand is not returned by any other op in the fn" error + # in the downstream analysis + for idx in block_id_to_block_arg_ids[block_id]: + next_fake_intermediate -= 1 + replacements[idx] = Intermediate(next_fake_intermediate) + else: + assert name in ("tt.reduce", "tt.scan") + # wire the block arguments to the op arguments + num_operands = len(operand_ids) + block_arg_ids = block_id_to_block_arg_ids[block_id] + assert len(block_arg_ids) == 2 * num_operands, ( + f"{name} is expected to have twice as " + "many block arguments as op arguments: " + f"{operand_ids=}, {block_arg_ids=}." + ) + for i, idx in enumerate(block_arg_ids): + # for a tt.reduce/tt.scan op with N arguments, the block + # arguments comprise N reduced values followed by + # N current values corresponding to the N op args + replacements[idx] = Intermediate( + operand_ids[i % num_operands] + ) + + if block_id in op_stack: + block_ops = op_stack.pop(block_id) + if not block_ops: + continue + last_ret, last_ops = block_ops.popitem() + if all( + op.name + in ("scf.yield", "tt.reduce.return", "tt.scan.return") + for op in last_ops + ): + # if last_ops are all return ops, treat them separately + return_ops.extend(last_ops) + else: + # otherwise, return last_ops to the block + block_ops[last_ret] = last_ops + for op_result, child_ops in block_ops.items(): + op_stack[parent_block_id][op_result].extend(child_ops) + + scf_results = [Intermediate(idx) for idx in result_ids] + for scf_result in scf_results: + for return_op in return_ops: + op_stack[parent_block_id][scf_result].append(return_op) + else: + raise RuntimeError( + f"Unknown blocked function: {name}. Can't capture the TTIR." + ) + else: + callee = None + if name == "tt.call": + callee = op.get_flat_symbol_ref_attr("callee") + args: List[Union[Param, Intermediate]] = [ + Intermediate(operand) for operand in operand_ids + ] + block_ops = op_stack[parent_block_id] + if result_ids: + for result_id in result_ids: + res = Intermediate(result_id) + block_ops[res].append(Op(name, callee, args, res)) + else: + next_fake_intermediate -= 1 + fake_res = Intermediate(next_fake_intermediate) + block_ops[fake_res].append(Op(name, callee, args, fake_res)) + + ttir_module.walk(mlir_to_functions) + + return functions + + +class MemoizeWithCycleCheck: + def __init__(self, fn): + self.fn = fn + self.reset() + + def __call__(self, functions, fn_name, num_args): + key = (fn_name, num_args) + if key not in self.cache: + self.cache[key] = None + self.cache[key] = self.fn(functions, fn_name, num_args) + if self.cache[key] is None: + raise RuntimeError("Recursion is not supported") + return self.cache[key] + + def reset(self): + self.cache = {} + + +@MemoizeWithCycleCheck +def analyze_kernel_mutations(functions, fn_name, num_args): + """ + Analyzes the graph to detect all sinks from a predefined list of sinks + by using triton's MemWrite trait list. NOTE: What if triton exposed this? + From each sink, it traverses the CFG backwards to identify all the input + pointers that are mutated. + """ + # Name of mutation op to mutated parameter indices + # List from Triton Github include/triton/Dialect/Triton/IR/TritonOps.td + # All the OPs that have MemWrite trait. + # What if Triton exposed this? + MUTATION_OPS = {"tt.store": [0], "tt.atomic_cas": [0], "tt.atomic_rmw": [0]} + # Ops that we want to bail out on + UNKNOWN_OPS = {"tt.elementwise_inline_asm"} + + stack: List[Union[Param, Intermediate]] = [] + visited = set() + ops = functions[fn_name] + for op_list in ops.values(): + for op in op_list: + if op.name in UNKNOWN_OPS: + raise RuntimeError( + f"ttir analysis hit an op we do not know how to analyze: {op.name}" + ) + + if op.name == "tt.call": + assert op.fn_call_name in functions + mutations = analyze_kernel_mutations( + functions, op.fn_call_name, len(op.args) + ) + stack.extend(arg for arg, mutated in zip(op.args, mutations) if mutated) + else: + for idx in MUTATION_OPS.get(op.name, []): + stack.append(op.args[idx]) + + # The following is an iterative DFS algorithm + mutated = [False] * num_args + while stack: + arg = stack.pop() + if arg in visited: + continue + + visited.add(arg) + + if isinstance(arg, Param): + if arg.idx >= num_args: + # This is an argument defined in the kernel, not passed in + continue + mutated[arg.idx] = True + elif isinstance(arg, Intermediate) and not arg.fake(): + for op in ops[arg]: + # Skip arguments to load + if op.name != "tt.load": + stack.extend(op.args) + return mutated + + +def identify_mutated_tensors(kernel, kwargs): + """ + Given a triton kernel and the arguments for this kernel, this function + 1) Retrieves the TTIR converted version of the kernel from Triton's API. + 2) Parses the TTIR and creates a control flow graph + 3) Analyzes the graph to detect all input tensor mutations + """ + + ttir_module = None + functions = None + try: + ttir_module, ordered_tensor_names = generate_ttir(kernel, kwargs) + + # extract functions from TTIR using MLIR bindings exposed by Triton code + functions = ttir_to_functions(ttir_module) + + assert functions is not None + kernel_name = next(iter(functions.keys())) + # Triton codegen modifies the name + assert kernel.fn.__name__ in kernel_name + # Reset the cache between top level invocations + # The cache for analyze kernel mutations is mainly used for cycle + # detection, so each top level invocation needs a clean cache + analyze_kernel_mutations.reset() + mutations = analyze_kernel_mutations( + functions, kernel_name, len(ordered_tensor_names) + ) + + return [ + ordered_tensor_names[i] for i, mutated in enumerate(mutations) if mutated + ] + except Exception as e: + log.warning( + "Encountered an exception in identify_mutated_tensors, assuming every input is mutated", + exc_info=True, + ) + if ttir_module is not None: + log.debug("TTIR:\n%s", str(ttir_module)) + if functions is not None: + log.debug("functions:") + for name, fn in functions.items(): + log.debug("===\t%s\t===", name) + for ret, ops in fn.items(): + log.debug("%s\t=>\t%s", ret, ops) + return [key for key, value in kwargs.items() if isinstance(value, Tensor)] + + +############################################################################### +# Triton Kernel Wrappers + + +# Used for wrapping a Triton Kernel +class TritonKernelWrapperMutation(HigherOrderOperator): + def __init__(self): + super().__init__("triton_kernel_wrapper_mutation") + + +triton_kernel_wrapper_mutation = TritonKernelWrapperMutation() + + +# Used for wrapping a Triton Kernel in a functional manner +class TritonKernelWrapperFunctional(HigherOrderOperator): + def __init__(self): + super().__init__("triton_kernel_wrapper_functional") + + +triton_kernel_wrapper_functional = TritonKernelWrapperFunctional() + + +@triton_kernel_wrapper_mutation.py_impl(DispatchKey.CompositeExplicitAutograd) +def triton_kernel_wrapper_mutation_dense( + *, kernel_idx, constant_args_idx, grid, kwargs +): + from torch._inductor.codegen.wrapper import user_defined_kernel_grid_fn_code + + kernel = kernel_side_table.get_kernel(kernel_idx) + constant_args = kernel_side_table.get_constant_args(constant_args_idx) + + if len(grid) == 1: + grid_fn = grid[0] + else: + fn_name, code = user_defined_kernel_grid_fn_code( + kernel.fn.__name__, kernel.configs, grid + ) + namespace: Dict[str, Any] = {} + exec(code, namespace) + grid_fn = namespace[fn_name] + + kernel[grid_fn](**kwargs, **constant_args) + + +@triton_kernel_wrapper_mutation.py_impl(FakeTensorMode) +def triton_kernel_wrapper_mutation_fake_tensor_mode( + mode, *, kernel_idx, constant_args_idx, grid, kwargs +): + with mode: + return None + + +def trace_triton_kernel_wrapper(proxy_mode, func_overload, node_args): + with disable_proxy_modes_tracing(): + out = func_overload(**node_args) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", + func_overload, + (), + proxy_args, + name=func_overload.__name__ + "_proxy", + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@triton_kernel_wrapper_mutation.py_impl(ProxyTorchDispatchMode) +def triton_kernel_wrapper_mutation_proxy_torch_dispatch_mode( + mode, *, kernel_idx, constant_args_idx, grid, kwargs +): + if mode.enable_tracing: + trace_triton_kernel_wrapper( + mode, + triton_kernel_wrapper_mutation, + { + "kernel_idx": kernel_idx, + "constant_args_idx": constant_args_idx, + "grid": grid, + "kwargs": kwargs, + }, + ) + else: + triton_kernel_wrapper_mutation( + kernel_idx=kernel_idx, + constant_args_idx=constant_args_idx, + grid=grid, + kwargs=kwargs, + ) + + return None + + +@triton_kernel_wrapper_mutation.py_functionalize_impl +def triton_kernel_wrapper_mutation_functionalize( + ctx, kernel_idx, constant_args_idx, grid, kwargs +): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + kernel = kernel_side_table.get_kernel(kernel_idx) + constant_args = kernel_side_table.get_constant_args(constant_args_idx) + # TODO(oulgen): Preexisting bug, if two kernel inputs are views of each + # other, and one gets mutated in kernel, and later another gets mutated, + # they are no longer equal. Fix this by graph breaking on this condition + # earlier in dynamo. + tensors_to_clone = identify_mutated_tensors( + kernel, {**unwrapped_kwargs, **constant_args} + ) + with ctx.redispatch_to_next(): + unwrapped_outputs = triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + constant_args_idx=constant_args_idx, + grid=grid, + kwargs=unwrapped_kwargs, + tensors_to_clone=tensors_to_clone, + ) + + assert set(unwrapped_outputs.keys()).issubset(set(kwargs.keys())) + for key, output_arg in unwrapped_outputs.items(): + if not isinstance(output_arg, Tensor): + continue + input_arg = kwargs[key] + assert isinstance(input_arg, Tensor) + + ctx.replace(input_arg, output_arg) + # indicate that above replace is hidden from autograd + ctx.mark_mutation_hidden_from_autograd(input_arg) + ctx.commit_update(input_arg) + ctx.sync(input_arg) + return None + + +@triton_kernel_wrapper_functional.py_impl(DispatchKey.CompositeExplicitAutograd) +def triton_kernel_wrapper_functional_dense( + *, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone +): + # TODO(oulgen): For performance reasons, we want to ensure that these + # `clone_preserve_strides` calls are never executed at runtime + # (inductor should always optimize them away). + # Requires https://github.com/pytorch/pytorch/issues/109240 + kwargs = { + key: (clone_preserve_strides(val) if key in tensors_to_clone else val) + for key, val in kwargs.items() + } + triton_kernel_wrapper_mutation( + kernel_idx=kernel_idx, + constant_args_idx=constant_args_idx, + grid=grid, + kwargs=kwargs, + ) + return {key: val for key, val in kwargs.items() if key in tensors_to_clone} + + +@triton_kernel_wrapper_functional.py_impl(FakeTensorMode) +def triton_kernel_wrapper_functional_fake_tensor_mode( + mode, *, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone +): + # TODO(oulgen): For performance reasons, we want to ensure that these + # `clone_preserve_strides` calls are never executed at runtime + # (inductor should always optimize them away). + # Requires https://github.com/pytorch/pytorch/issues/109240 + with mode: + return { + key: clone_preserve_strides(val) + for key, val in kwargs.items() + if key in tensors_to_clone + } + + +@triton_kernel_wrapper_functional.py_impl(ProxyTorchDispatchMode) +def triton_kernel_wrapper_functional_proxy_torch_dispatch_mode( + mode, *, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone +): + if mode.enable_tracing: + return trace_triton_kernel_wrapper( + mode, + triton_kernel_wrapper_functional, + { + "kernel_idx": kernel_idx, + "constant_args_idx": constant_args_idx, + "grid": grid, + "kwargs": kwargs, + "tensors_to_clone": tensors_to_clone, + }, + ) + else: + return triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + grid=grid, + kwargs=kwargs, + tensors_to_clone=tensors_to_clone, + ) + + +@triton_kernel_wrapper_functional.py_functionalize_impl +def triton_kernel_wrapper_functional_functionalize( + ctx, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone +): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + with ctx.redispatch_to_next(): + outputs = triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + constant_args_idx=constant_args_idx, + grid=grid, + kwargs=unwrapped_kwargs, + tensors_to_clone=tensors_to_clone, + ) + return ctx.wrap_tensors(outputs) + + +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.ADInplaceOrView) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.BackendSelect) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCPU) + +triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.ADInplaceOrView) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.BackendSelect) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCPU) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b393e7c2345dd1336fc9f9fbdbf01b73567db4 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py @@ -0,0 +1,212 @@ +# mypy: allow-untyped-defs +import functools +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Callable + +import torch +import torch.fx.traceback as fx_traceback +import torch.utils._pytree as pytree +from torch._ops import HigherOrderOperator +from torch.fx.experimental.proxy_tensor import make_fx +from torch.multiprocessing.reductions import StorageWeakRef + + +@dataclass +class UnsupportedAliasMutationException(RuntimeError): + reason: str + + +def autograd_not_implemented_inner( + operator: HigherOrderOperator, delayed_error: bool, *args: Any, **kwargs: Any +) -> Any: + """If autograd is enabled and any of the arguments require grad this will either + raise an error or return a DelayedError depending on the value of delayed. + + Args: + operator: The HigherOrderOperator to call with the *args and **kwargs with + op_name: The name of the HigherOrderOperator + delayed_error: If True, return a DelayedError instead of raising an error + args: The flattened operands to the HigherOrderOperator + kwargs: The keyword arguments to the HigherOrderOperator + + Raises: + RuntimeError: If autograd is enabled and any of the arguments to the HigherOrderOperator + """ + with torch._C._AutoDispatchBelowAutograd(): + result = operator(*args, **kwargs) + flat_operands = pytree.arg_tree_leaves(*args) + if torch.is_grad_enabled() and any( + f.requires_grad for f in flat_operands if isinstance(f, torch.Tensor) + ): + if delayed_error: + err_fn = torch._C._functions.DelayedError( + f"Autograd not implemented for {str(operator)}", + 1, + ) + + def fake_requires_grad(tensor): + if torch.is_floating_point(tensor) or torch.is_complex(tensor): + tensor = tensor.detach() + tensor.requires_grad = True + return tensor + + return pytree.tree_map_only( + torch.Tensor, lambda x: err_fn(fake_requires_grad(x)), result + ) + else: + raise RuntimeError(f"Autograd not implemented for {str(operator)}") + return result + + +def autograd_not_implemented(op: HigherOrderOperator, deferred_error: bool) -> Callable: + def inner(*args, **kwargs): + return autograd_not_implemented_inner(op, deferred_error, *args, **kwargs) + + return inner + + +def _maybe_run_with_interpreter(fn): + maybe_interpreted_fn = fn + if isinstance(fn, torch.fx.GraphModule) and fx_traceback.has_preserved_node_meta(): + # Running graph with interpreter is needed for propagating the stack_trace + def graph_with_interpreter(*args): + with fx_traceback.preserve_node_meta(): + return torch.fx.Interpreter(fn).run(*args) + + maybe_interpreted_fn = graph_with_interpreter + return maybe_interpreted_fn + + +def reenter_make_fx(fn): + from torch.fx.experimental.proxy_tensor import _CURRENT_MAKE_FX_TRACER + + @functools.wraps(fn) + def wrapped(*args): + assert ( + _CURRENT_MAKE_FX_TRACER is not None + ), "Cannot reenter make_fx when we're not under a make_fx tracing session" + return _CURRENT_MAKE_FX_TRACER.trace_subgraph( + _maybe_run_with_interpreter(fn), *args + ) + + return wrapped + + +@contextmanager +def _set_compilation_env(): + _old_is_tracing = torch.fx._symbolic_trace._is_fx_tracing_flag + _old_is_inlining = torch._dynamo.config.inline_inbuilt_nn_modules + try: + # We need to turn off the is_fx_tracing_flag. Remove this flag check from dyanmo + # once we are confident fx tracing works with dynamo. + torch.fx._symbolic_trace._is_fx_tracing_flag = False + + # TODO(anijain2305, export-team) For non-strict export with module + # stack info, the codepatch forces the nn module __getattr__ to + # ProxyAttr __getattr__ downstream. To circumvent the issue for now, + # skip inlining inbuilt nn modules for cond. + torch._dynamo.config.inline_inbuilt_nn_modules = False + yield + finally: + torch.fx._symbolic_trace._is_fx_tracing_flag = _old_is_tracing + torch._dynamo.config.inline_inbuilt_nn_modules = _old_is_inlining + + +def _has_potential_branch_input_mutation(branch, inputs, pre_dispatch=False): + """ + Dispatch-trace the branch with inputs and check if + producing graph has mutable op on the input. This is + bit restrictive as the branch must be traceable. + """ + try: + gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs) + except UnsupportedAliasMutationException: + # this can happen when nested cond_op is + # functionalized + return True + except Exception as e: + raise e + + def _detect_input_mutation(gm): + input_nodes = set() + for node in gm.graph.nodes: + if node.op == "placeholder": + input_nodes.add(node) + if node.op == "call_function": + target = node.target + if ( + isinstance(target, torch._ops.OpOverload) + and target._schema.is_mutable + ): + for arg in node.args: + if arg in input_nodes: + return True + + for _, module in gm.named_children(): + if isinstance(module, torch.fx.GraphModule): + if _detect_input_mutation(module): + return True + + return False + + return _detect_input_mutation(gm) + + +def _has_potential_branch_input_alias(branch, inputs, pre_dispatch=False): + """ + Dispatch-trace the branch with inputs and check if + producing graph has output aliasing the branch input. This is + bit restrictive as the branch must be traceable. + """ + try: + gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs) + except UnsupportedAliasMutationException: + # this can happen when nested cond_op is + # functionalized + return True + except Exception as e: + raise e + + def _detect_input_alias(gm): + input_storages = set() + for node in gm.graph.nodes: + # We need to check existence of "val" because we reuse the logic here + # for map operator, where num_mapped_args is a scalar + # and doesn't have a "val" meta. + if node.op == "placeholder" and "val" in node.meta: + input_storages.add(StorageWeakRef(node.meta["val"]._typed_storage())) + if node.op == "output": + + def check_alias(out): + if out is not None and "val" in out.meta: + out_storage = StorageWeakRef(out.meta["val"]._typed_storage()) + return out_storage in input_storages + return False + + if any(pytree.tree_leaves(pytree.tree_map(check_alias, node.args))): + return True + + for _, module in gm.named_children(): + if isinstance(module, torch.fx.GraphModule) and _detect_input_alias(module): + return True + + return False + + return _detect_input_alias(gm) + + +def unique_graph_id(proxy_mode, prefix): + """Returns a unique name and id for a graph to be added to a proxy_mode tracer""" + # There are probably better ways - I know that create_arg has some self incrementing name + # magic to it, but since we explicitly have to get the name for register_module, + # I was not sure how to do that. This kinda simulates it. + next_name = None + i = 0 + while not next_name: + candidate = f"{prefix}_{i}" + if hasattr(proxy_mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + return i, next_name diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..4577036b731f5826817301a5b550376bc4cc5bff --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py @@ -0,0 +1,270 @@ +# mypy: allow-untyped-defs +from typing import Callable, Tuple, Union + +import torch +import torch.utils._pytree as pytree + +from torch._C import DispatchKey + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + _set_compilation_env, + autograd_not_implemented, + reenter_make_fx, + UnsupportedAliasMutationException, +) +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree + + +class WhileLoopOp(HigherOrderOperator): + def __init__(self): + super().__init__("while_loop") + + def __call__( + self, + cond_fn: Callable, + body_fn: Callable, + carried_inputs: Tuple[Union[torch.Tensor, int, float, bool]], + additional_inputs: Tuple[Union[torch.Tensor, int, float, bool]], + /, + ): + if not isinstance(carried_inputs, tuple): + raise RuntimeError( + f"carried_inputs must be a tuple, got {type(carried_inputs)}" + ) + if not isinstance(additional_inputs, tuple): + raise RuntimeError( + f"additional_inputs must be a tuple, got {type(additional_inputs)}" + ) + if not all( + isinstance(t, (torch.Tensor, int, float, bool)) for t in carried_inputs + ): + raise RuntimeError( + "carried_inputs must be a tuple of tensors, ints, floats, or bools, got " + f"{carried_inputs}" + ) + + if not all( + isinstance(t, (torch.Tensor, int, float, bool)) for t in additional_inputs + ): + raise RuntimeError( + "additional_inputs must be a tuple of tensors, ints, floats, or bools, got " + f"{additional_inputs}" + ) + return super().__call__(cond_fn, body_fn, carried_inputs, additional_inputs) + + +while_loop_op = WhileLoopOp() +# Override while_loop_op.__module__ to "torch.ops.higher_order" so that in the generated +# graph module, while_loop node's target is correctedly printed as torch.ops.higher_order.while_loop +while_loop_op.__module__ = "torch.ops.higher_order" + + +def while_loop(cond_fn, body_fn, carried_inputs): + r""" + Run body_fn(*carried_inputs) while cond_fn(*carried_inputs) returns a True scalar tensor. Returns the output of body_fn or + initial carried_inputs. + + .. warning:: + `torch.while_loop` is a prototype feature in PyTorch. It has limited support for input and output types and + doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch. + Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype + + `while_loop` is a structured control flow operator. It preserves the loop semantic across the torch.compile and torch.export. + + `while_loop` is equivalent to the following: + + def while_loop(cond_fn, body_fn, carried_inputs): + val = carried_inputs + while cond_fn(*val): + val = body_fn(*val) + return val + + Args: + cond_fn (Callable): A callable function that returns a boolean Scalar tensor. + + body_fn (Callable): A callable function that takes the same inputs as `cond_fn` and returns a tuple of tensors + + carried_inputs (Tuple of possibly nested dict/list/tuple of tensors): A tuple of inputs to cond_fn and body_fn. It's also + the initial value of states that are carried across iterations. + + Example: + + def cond_fn(iter, x): + return iter.sum() < 10 + + def body_fn(iter, x): + return iter + 1, x.sin() + + while_loop(cond_fn, body_fn, (torch.zeros(1), torch.randn(3, 4))) + + Restrictions: + + - body_fn must return tensors with the same metadata (e.g.shape, dtype) as inputs. + + - body_fn and cond_fn must not in-place mutate the carried_inputs. A clone before the mutation is required. + + - body_fn and cond_fn must not mutate python varialbles (e.g. list/dict) created outside of the body_fn. + + - body_fn and cond_fn's output cannot aliase any of the inputs. A clone is required. + + .. warning:: + Temporal Limitations: + + - 'while_loop' only supports **inference** right now. Autograd will be supported in the future. + + """ + + # Currently, additional_inputs is not a user-facing input. It will be automatically set in dynamo. + # parameters and buffers accessed in cond_fn or body_fn or tensor closures will become additional_inputs. + additional_inputs: Tuple = tuple() + if torch.compiler.is_dynamo_compiling(): + return while_loop_op(cond_fn, body_fn, carried_inputs, additional_inputs) + + def _validate_input(cond_fn, body_fn, carried_inputs): + if not callable(cond_fn) or not callable(body_fn): + raise RuntimeError("Expect cond_fn and body_fn to be callbale.") + + if not isinstance(carried_inputs, (tuple, list)) or pytree.tree_any( + lambda t: not isinstance(t, torch.Tensor), carried_inputs + ): + raise RuntimeError( + "Expect carried_inputs to be a tuple of possibly nested dict/list/tuple that only" + f"consists of tensor leaves, but got {carried_inputs}." + ) + + _validate_input(cond_fn, body_fn, carried_inputs) + + with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit(): + return torch.compile(while_loop_op, backend="eager", fullgraph=True)( + cond_fn, body_fn, carried_inputs, additional_inputs + ) + + +@while_loop_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def while_loop_dense(cond_fn, body_fn, carried_inputs, additional_inputs): + carried_vals = carried_inputs + + def _is_boolean_scalar_tensor(pred): + return ( + isinstance(pred, torch.Tensor) + and pred.size() == torch.Size([]) + and pred.dtype == torch.bool + ) + + if not isinstance(carried_inputs, tuple): + raise RuntimeError( + f"carried_inputs must be a tuple but got {type(carried_inputs)}" + ) + + while pred := cond_fn(*carried_vals, *additional_inputs): + if not _is_boolean_scalar_tensor(pred): + raise RuntimeError( + f"cond_fn must return a boolean scalar tensor but got {pred}" + ) + out = body_fn(*carried_vals, *additional_inputs) + assert isinstance( + out, tuple + ), f"body_fn should return a tuple but got {type(out)}" + assert len(out) == len( + carried_inputs + ), "body_fn should return the same number of elements as carried_inputs" + carried_vals = out + return carried_vals + + +while_loop_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(while_loop_op, deferred_error=True) +) + + +@while_loop_op.py_impl(ProxyTorchDispatchMode) +def while_loop_tracing(mode, cond_fn, body_fn, carried_inputs, additional_inputs): + def _trace_while_loop( + proxy_mode, while_loop_op, cond_fn, body_fn, carried_inputs, additional_inputs + ): + cond_graph = reenter_make_fx(cond_fn)(*carried_inputs, *additional_inputs) + body_graph = reenter_make_fx(body_fn)(*carried_inputs, *additional_inputs) + + next_name = None + i = 0 + while not next_name: + candidate = f"while_loop_cond_graph_{i}" + if hasattr(proxy_mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + cond_graph_name = next_name + body_graph_name = f"while_loop_body_graph_{i}" + assert not hasattr(proxy_mode.tracer.root, body_graph_name) + + proxy_mode.tracer.root.register_module(cond_graph_name, cond_graph) + proxy_mode.tracer.root.register_module(body_graph_name, body_graph) + + args = (cond_graph, body_graph, carried_inputs, additional_inputs) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) + + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", while_loop_op, proxy_args, {}, name="while_loop" + ) + + # body_fn return output with the same pytree and tensor meta data as carried_inputs + # so we could just return the output after one iteration. + out = body_fn(*carried_inputs, *additional_inputs) + return track_tensor_tree( + out, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + if mode.enable_tracing: + return _trace_while_loop( + mode, while_loop_op, cond_fn, body_fn, carried_inputs, additional_inputs + ) + else: + return while_loop_op(cond_fn, body_fn, carried_inputs, additional_inputs) + + +@while_loop_op.py_impl(FakeTensorMode) +def while_loop_fake_tensor_mode( + mode, cond_fn, body_fn, carried_inputs, additional_inputs +): + with mode: + return body_fn(*carried_inputs, *additional_inputs) + + +@while_loop_op.py_functionalize_impl +def while_loop_func(ctx, cond_fn, body_fn, carried_inputs, additional_inputs): + unwrapped_carried_inputs = ctx.unwrap_tensors(carried_inputs) + unwrapped_additional_inputs = ctx.unwrap_tensors(additional_inputs) + unwrapped_inputs = unwrapped_carried_inputs + unwrapped_additional_inputs + with ctx.redispatch_to_next() as m: + functional_cond_fn = ctx.functionalize(cond_fn) + functional_body_fn = ctx.functionalize(body_fn) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + for fn, fn_name in [ + (functional_cond_fn, "cond_fn"), + (functional_body_fn, "body_fn"), + ]: + if _has_potential_branch_input_mutation( + fn, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + f"torch.while_loop's {fn_name} might be modifying the input!" + ) + + if _has_potential_branch_input_alias( + fn, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + f"torch.while_loop's {fn_name} might be aliasing the input!" + ) + ret = while_loop_op( + functional_cond_fn, + functional_body_fn, + unwrapped_carried_inputs, + unwrapped_additional_inputs, + ) + return ctx.wrap_tensors(ret) diff --git a/valley/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py new file mode 100644 index 0000000000000000000000000000000000000000..6d83a44e752a06be9333ce0ccb6c306d9e65d6cf --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py @@ -0,0 +1,184 @@ +# mypy: allow-untyped-defs +import inspect +import logging + +import torch +from torch._ops import HigherOrderOperator +from torch.utils.checkpoint import checkpoint, uid +import torch._dynamo.config + +log = logging.getLogger(__name__) + + + +# Used for testing the HigherOrderOperator mechanism +class Wrap(HigherOrderOperator): + def __init__(self): + super().__init__("wrap") + + def __call__(self, func, *args, **kwargs): + # Dynamo already traces the body of HigherOrderOp beforehand when it + # so no need to trace into it. + import torch._dynamo # noqa: F401 + from torch._dynamo import disable + + @disable + def wrapper(): + result = func(*args, **kwargs) + return result + + return wrapper() + +wrap = Wrap() + +class WrapWithSetGradEnabled(HigherOrderOperator): + def __init__(self): + super().__init__("wrap_with_set_grad_enabled") + + def __call__(self, enable_grad, wrapped_func, *args, **kwargs): + # Dynamo already traces the body of HigherOrderOp beforehand when it + # so no need to trace into it. + import torch._dynamo # noqa: F401 + from torch._dynamo import disable + + @disable + def wrapper(): + with torch.set_grad_enabled(enable_grad): + return wrapped_func(*args, **kwargs) + return wrapper() + +wrap_with_set_grad_enabled = WrapWithSetGradEnabled() + +class WrapActivationCheckpoint(HigherOrderOperator): + """ + This operator is used to wrap torch.utils.checkpoint. This avoids + TorchDynamo to look into saved tensor hooks and directly passes the control + to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of + AOT tracing torch.utils.checkpoint code, we have a backward graph with + recomputed forward nodes. + + However, we might deprecate this operator soon. The difficulty arises in the + functionalization of rng ops. Today, there are two different + functionalization of rng ops - one at AOT autograd and other at Inductor. + And they are difficult to map to each other. The rng states also complicate + pattern matching in Inductor. Due to the ease of implementation, we are + currently inclined towards functionalization at Inductor level, which means + that duplication/recomputation is done as a compiler pass in the + partitioners. See TagActivationCheckpoint for more information. + """ + def __init__(self): + super().__init__("wrap_activation_checkpoint") + + def __call__(self, function, *args, **kwargs): + # use_reentrant is set to False because this op is going to be traced. + # And we ensure that AOT Autograd traces through the non reentrant + # version of checkpointing. + import torch.fx.traceback as fx_traceback + from torch.fx import Interpreter + kwargs["use_reentrant"] = False + kwargs["preserve_rng_state"] = False + # Using interpreter allows preservation of metadata through torch.compile stack. + with fx_traceback.preserve_node_meta(): + return checkpoint(Interpreter(function).run, *args, **kwargs) + +wrap_activation_checkpoint = WrapActivationCheckpoint() + +class TagActivationCheckpoint(HigherOrderOperator): + """ + This operator is supposed to be used only with torch.compile stack. This + accepts a Fx graph module which needs to be checkpointed. This operator adds + "recomputable" tag to the nodes of the Fx graph that should be recomputed. + + The goal is to: + 1. Avoid using Dynamo to trace through saved tensor hooks. + 2. For selective checkpointing case, let AOTAutograd trace through + saved tensor hooks but has special logic with TorchDispatchMode to override + the usual saved_tensor_hooks fn logic in order to tag the nodes. + 3. Rely on the partitioners to actually duplicate the nodes. + This sits well in the torch.compile stack, because by the time graph + reaches partitioner, inductor has already run its functionalization of rng + ops (by setting fixed seed for each random op, see `replace_random_passes`). + Therefore, the duplication of nodes, by design, respects the rng states in + the forward and recomputed forward in backward. + """ + + def __init__(self): + super().__init__("tag_activation_checkpoint") + + @staticmethod + def divide_kwargs(kwargs): + """ + checkpoint fn can have mixed kwargs between checkpointed fn and + checkpoint fn itself. For example + >> def gn(x, y, z=None): + >> a = torch.matmul(x, y) + >> if z is not None: + >> return torch.matmul(a, z) + >> return a + >> def fn(x, y, z): + >> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z)) + In the above case, z belongs to checkpointed function gn, but + use_reentrant belongs to the checkpoint function. This function splits + the kwargs into checkpoint_kwargs and gmod_kwargs (or + checkpointed_fn_kwargs). + We do sorting to ensure same graph from run to run for better + debuggability. It is not required for correctness. + """ + ckpt_signature = inspect.signature(checkpoint) + checkpoint_keys = set() + for name in ckpt_signature.parameters: + if name in ("function", "args", "kwargs"): + continue + checkpoint_keys.add(name) + + # `preserve_rng_state` is not a regular kwarg + checkpoint_keys.add("preserve_rng_state") + + checkpoint_kwargs = {name: kwargs[name] for name in kwargs.keys() if name in checkpoint_keys} + gmod_kwargs = {name: kwargs[name] for name in kwargs.keys() if name not in checkpoint_keys} + return checkpoint_kwargs, gmod_kwargs + + def tag_nodes(self, gmod): + unique_graph_id = next(uid) + for node in gmod.graph.nodes: + if node.op in ("call_function", "call_method", "call_module"): + node.meta["recompute"] = unique_graph_id + return gmod + + def __call__(self, gmod, *args, **kwargs): + import torch.fx.traceback as fx_traceback + from torch.fx import Interpreter + if "_checkpoint_context_fn" in gmod.meta: + assert torch._dynamo.config._experimental_support_context_fn_in_torch_utils_checkpoint, \ + "Passing context_fn to torch.utils.checkpoint is currently not supported under torch.compile" + log.warning(""" +Detected that context_fn is passed to torch.utils.checkpoint under torch.compile. +Please make sure the checkpointed region does not contain in-place ops (e.g. torch.relu_). +""") + # use_reentrant is set to False because this op is going to be traced. + # And we ensure that AOT Autograd traces through the non reentrant + # version of checkpointing. + kwargs["use_reentrant"] = False + # preserve_rng_state is set to False because we want to prevent AOTAutograd from tracing through + # `torch.random.fork_rng` op (which is not supported yet under CUDA). + # This doesn't mean that we don't preserve RNG state. Instead, we will always preserve RNG state + # regardless of this flag (by doing RNG functionalization via `replace_random_passes` in Inductor + # instead of in AOTAutograd). + kwargs["preserve_rng_state"] = False + kwargs["context_fn"] = gmod.meta["_checkpoint_context_fn"] + # We first tag all nodes as "recompute" in this graph, and then we undo the "recompute" tag + # for specific nodes in _CachingTorchDispatchMode in torch/utils/checkpoint.py. + gmod = self.tag_nodes(gmod) + # Using interpreter allows preservation of metadata through torch.compile stack. + with fx_traceback.preserve_node_meta(): + return checkpoint(Interpreter(gmod).run, *args, **kwargs) + else: + gmod = self.tag_nodes(gmod) + # Using interpreter allows preservation of metadata through torch.compile stack. + # TODO: We want to use the same `checkpoint(Interpreter(gmod).run, *args, **kwargs)` here + # as the `context_fn != None` case, but that depends on in-place op support in TorchDispatchMode + torch.compile. + # (for details on in-place op issue, run `test_compile_selective_checkpoint_inplace_op` unit test) + with fx_traceback.preserve_node_meta(): + return Interpreter(gmod).run(*args) + +tag_activation_checkpoint = TagActivationCheckpoint() diff --git a/valley/lib/python3.10/site-packages/torch/futures/__init__.py b/valley/lib/python3.10/site-packages/torch/futures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e1623c44f193d74de2a8d099699cd3439a1f1227 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/futures/__init__.py @@ -0,0 +1,319 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union + +import torch + +__all__ = ['Future', 'collect_all', 'wait_all'] + +T = TypeVar("T") +S = TypeVar("S") + + +class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef] + pass + + +class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta): + r""" + Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous + execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It + also exposes a set of APIs to add callback functions and set results. + + .. warning:: GPU support is a beta feature, subject to changes. + """ + + def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None): + r""" + Create an empty unset ``Future``. If the future is intended to hold + values containing CUDA tensors, (a superset of) their CUDA devices must + be specified at construction. (This is only supported if + ``torch.cuda.is_available()`` returns ``True``). This is needed to + ensure proper CUDA stream synchronization. The child futures, returned + by the ``then`` method, will inherit these devices. + + Args: + devices(``List[Union[int, str, torch.device]]``, optional): the set + of devices on which tensors contained in this future's value are + allowed to reside and on which callbacks are allowed to operate. + """ + if devices is None: + devices = [] + super().__init__([torch.device(d) for d in devices]) + + def done(self) -> bool: + r""" + Return ``True`` if this ``Future`` is done. A ``Future`` is done if it + has a result or an exception. + + If the value contains tensors that reside on GPUs, ``Future.done()`` + will return ``True`` even if the asynchronous kernels that are + populating those tensors haven't yet completed running on the device, + because at such stage the result is already usable, provided one + performs the appropriate synchronizations (see :meth:`wait`). + """ + return super().done() + + def wait(self) -> T: + r""" + Block until the value of this ``Future`` is ready. + + If the value contains tensors that reside on GPUs, then an additional + synchronization is performed with the kernels (executing on the device) + which may be asynchronously populating those tensors. Such sync is + non-blocking, which means that ``wait()`` will insert the necessary + instructions in the current streams to ensure that further operations + enqueued on those streams will be properly scheduled after the async + kernels but, once that is done, ``wait()`` will return, even if those + kernels are still running. No further synchronization is required when + accessing and using the values, as long as one doesn't change streams. + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``wait`` method will + also throw an error. + """ + return super().wait() + + def value(self) -> T: + r""" + Obtain the value of an already-completed future. + + This method should only be called after a call to :meth:`wait` has + completed, or inside a callback function passed to :meth:`then`. In + other cases this ``Future`` may not yet hold a value and calling + ``value()`` could fail. + + If the value contains tensors that reside on GPUs, then this method will + *not* perform any additional synchronization. This should be done + beforehand, separately, through a call to :meth:`wait` (except within + callbacks, for which it's already being taken care of by :meth:`then`). + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``value()`` method will + also throw an error. + """ + return super().value() + + def then(self, callback: Callable[[Future[T]], S]) -> Future[S]: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed (to enforce a certain order consider chaining: + ``fut.then(cb1).then(cb2)``). The callback must take one argument, which + is the reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run immediately inline. + + If the ``Future``'s value contains tensors that reside on GPUs, the + callback might be invoked while the async kernels that are populating + those tensors haven't yet finished executing on the device. However, the + callback will be invoked with some dedicated streams set as current + (fetched from a global pool) which will be synchronized with those + kernels. Hence any operation performed by the callback on these tensors + will be scheduled on the device after the kernels complete. In other + words, as long as the callback doesn't switch streams, it can safely + manipulate the result without any additional synchronization. This is + similar to the non-blocking behavior of :meth:`wait`. + + Similarly, if the callback returns a value that contains tensors that + reside on a GPU, it can do so even if the kernels that are producing + these tensors are still running on the device, as long as the callback + didn't change streams during its execution. If one wants to change + streams, one must be careful to re-synchronize them with the original + streams, that is, those that were current when the callback was invoked. + + Args: + callback(``Callable``): a ``Callable`` that takes this ``Future`` as + the only argument. + + Returns: + A new ``Future`` object that holds the return value of the + ``callback`` and will be marked as completed when the given + ``callback`` finishes. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, the + future returned by ``then`` will be marked appropriately with the + encountered error. However, if this callback later completes + additional futures, those futures are not marked as completed with + an error and the user is responsible for handling completion/waiting + on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print(f"RPC return value is {fut.wait()}.") + >>> fut = torch.futures.Future() + >>> # The inserted callback will print the return value when + >>> # receiving the response from "worker1" + >>> cb_fut = fut.then(callback) + >>> chain_cb_fut = cb_fut.then( + ... lambda x : print(f"Chained cb done. {x.wait()}") + ... ) + >>> fut.set_result(5) + RPC return value is 5. + Chained cb done. None + """ + return cast(Future[S], super().then(callback)) + + def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed. The callback must take one argument, which is the + reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run inline. + + We recommend that you use the :meth:`then` method as it provides a way + to synchronize after your callback has completed. ``add_done_callback`` + can be cheaper if your callback does not return anything. But both + :meth:`then` and ``add_done_callback`` use the same callback + registration API under the hood. + + With respect to GPU tensors, this method behaves in the same way as + :meth:`then`. + + Args: + callback(``Future``): a ``Callable`` that takes in one argument, + which is the reference to this ``Future``. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, + error handling must be carefully taken care of. For example, if + this callback later completes additional futures, those futures are + not marked as completed with an error and the user is responsible + for handling completion/waiting on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print("This will run after the future has finished.") + ... print(fut.wait()) + >>> fut = torch.futures.Future() + >>> fut.add_done_callback(callback) + >>> fut.set_result(5) + This will run after the future has finished. + 5 + """ + super().add_done_callback(callback) + + def set_result(self, result: T) -> None: + r""" + Set the result for this ``Future``, which will mark this ``Future`` as + completed and trigger all attached callbacks. Note that a ``Future`` + cannot be marked completed twice. + + If the result contains tensors that reside on GPUs, this method can be + called even if the asynchronous kernels that are populating those + tensors haven't yet completed running on the device, provided that the + streams on which those kernels were enqueued are set as the current ones + when this method is called. Put simply, it's safe to call this method + immediately after launching those kernels, without any additional + synchronization, as long as one doesn't change streams in between. This + method will record events on all the relevant current streams and will + use them to ensure proper scheduling for all the consumers of this + ``Future``. + + Args: + result (object): the result object of this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> import threading + >>> import time + >>> def slow_set_future(fut, value): + ... time.sleep(0.5) + ... fut.set_result(value) + >>> fut = torch.futures.Future() + >>> t = threading.Thread( + ... target=slow_set_future, + ... args=(fut, torch.ones(2) * 3) + ... ) + >>> t.start() + >>> print(fut.wait()) + tensor([3., 3.]) + >>> t.join() + """ + super().set_result(result) + + def set_exception(self, result: T) -> None: + r""" + Set an exception for this ``Future``, which will mark this ``Future`` as + completed with an error and trigger all attached callbacks. Note that + when calling wait()/value() on this ``Future``, the exception set here + will be raised inline. + + Args: + result (BaseException): the exception for this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut = torch.futures.Future() + >>> fut.set_exception(ValueError("foo")) + >>> fut.wait() + Traceback (most recent call last): + ... + ValueError: foo + """ + assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception." + + def raise_error(fut_result): + raise fut_result + + super()._set_unwrap_func(raise_error) + self.set_result(result) # type: ignore[arg-type] + + +def collect_all(futures: List[Future]) -> Future[List[Future]]: + r""" + Collects the provided :class:`~torch.futures.Future` objects into a single + combined :class:`~torch.futures.Future` that is completed when all of the + sub-futures are completed. + + Args: + futures (list): a list of :class:`~torch.futures.Future` objects. + + Returns: + Returns a :class:`~torch.futures.Future` object to a list of the passed + in Futures. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut0 = torch.futures.Future() + >>> fut1 = torch.futures.Future() + >>> fut = torch.futures.collect_all([fut0, fut1]) + >>> fut0.set_result(0) + >>> fut1.set_result(1) + >>> fut_list = fut.wait() + >>> print(f"fut0 result = {fut_list[0].wait()}") + fut0 result = 0 + >>> print(f"fut1 result = {fut_list[1].wait()}") + fut1 result = 1 + """ + return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures))) + + +def wait_all(futures: List[Future]) -> List: + r""" + Waits for all provided futures to be complete, and returns + the list of completed values. If any of the futures encounters an error, + the method will exit early and report the error not waiting for other + futures to complete. + + Args: + futures (list): a list of :class:`~torch.futures.Future` object. + + Returns: + A list of the completed :class:`~torch.futures.Future` results. This + method will throw an error if ``wait`` on any + :class:`~torch.futures.Future` throws. + """ + return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()] diff --git a/valley/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ebf2b49f2ca888f79977b754ab5112d2b3fa962 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..775fb9126d7747c07b7cd8c534cec0c50573b81d Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54dcfabb5b85737e4388dd9cdfb93c23c8173ed Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4baf4b1ed98a7aa8c1c2c43c9f8a13da6d50c1a9 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b368686984dd5eb38868cb7e8ff28288e4553a5 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c05a14965ea6f6929d7bd08d99eee0cad51717c Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe95807ea272662cf62c591f014ed03e8d25eb3 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f2d6ba5ad3a2d7af17ca69b2cfd3855b4c90bc6 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..727e601e21e06afe1a87c48484a1dbb94d9cf4b0 Binary files /dev/null and b/valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc differ diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/_functions.py b/valley/lib/python3.10/site-packages/torch/nn/parallel/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..d987ed2bc427462bf517665bda8e1889d5a58c90 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/nn/parallel/_functions.py @@ -0,0 +1,126 @@ +import warnings + +import torch +from . import comm +from torch.autograd import Function +from torch._utils import _get_device_index +from typing import List, Optional + + +class Broadcast(Function): + + @staticmethod + def forward(ctx, target_gpus, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Broadcast function not implemented for CPU tensors' + ) + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.target_gpus = target_gpus + if len(inputs) == 0: + return tuple() + ctx.num_inputs = len(inputs) + ctx.input_device = inputs[0].get_device() + outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus) + non_differentiables = [] + for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]): + if not input_requires_grad: + for output in outputs: + non_differentiables.append(output[idx]) + ctx.mark_non_differentiable(*non_differentiables) + return tuple([t for tensors in outputs for t in tensors]) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs) + + +class ReduceAddCoalesced(Function): + + @staticmethod + def forward(ctx, destination, num_inputs, *grads): + ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)] + + grads_ = [grads[i:i + num_inputs] + for i in range(0, len(grads), num_inputs)] + return comm.reduce_add_coalesced(grads_, destination) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs) + + +class Gather(Function): + + @staticmethod + def forward(ctx, target_device, dim, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Gather function not implemented for CPU tensors' + ) + if (target_device == 'cpu'): + ctx.target_device = 'cpu' + else: + target_device = _get_device_index(target_device, True) + ctx.target_device = target_device + ctx.dim = dim + ctx.input_gpus = tuple(i.get_device() for i in inputs) + if all(t.dim() == 0 for t in inputs) and dim == 0: + inputs = tuple(t.view(1) for t in inputs) + warnings.warn('Was asked to gather along dimension 0, but all ' + 'input tensors were scalars; will instead unsqueeze ' + 'and return a vector.') + ctx.unsqueezed_scalar = True + else: + ctx.unsqueezed_scalar = False + ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs) + return comm.gather(inputs, ctx.dim, ctx.target_device) + + @staticmethod + def backward(ctx, grad_output): + scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output) + if ctx.unsqueezed_scalar: + scattered_grads = tuple(g[0] for g in scattered_grads) + return (None, None) + scattered_grads + + +class Scatter(Function): + + @staticmethod + def forward(ctx, target_gpus, chunk_sizes, dim, input): + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.dim = dim + ctx.input_device = input.get_device() if input.device.type != "cpu" else -1 + streams = None + if torch.cuda.is_available() and ctx.input_device == -1: + # Perform CPU to GPU copies in a background stream + streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus] + outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams) + # Synchronize with the copy stream + if streams is not None: + for i, output in enumerate(outputs): + with torch.cuda.device(target_gpus[i]): + main_stream = torch.cuda.current_stream() + main_stream.wait_stream(streams[i]) + output.record_stream(main_stream) + return outputs + + @staticmethod + def backward(ctx, *grad_output): + return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output) + + +# background streams used for copying +_streams: Optional[List[Optional[torch.Stream]]] = None + +def _get_stream(device: torch.device): + """Get a background stream for copying between CPU and target device.""" + global _streams + if device.type == "cpu": + return None + device_mod = getattr(torch, device.type, None) + if device_mod is None: + return None + if _streams is None: + _streams = [None] * device_mod.device_count() + if _streams[device.index] is None: + _streams[device.index] = device_mod.Stream(device.index) + return _streams[device.index] diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py b/valley/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..3980706a932a19f5e6ee6ee709a27a0d02f9f0d5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py @@ -0,0 +1,270 @@ +# mypy: allow-untyped-defs +import operator +import torch +import warnings +from itertools import chain +from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union +from ..modules import Module +from .scatter_gather import scatter_kwargs, gather +from .replicate import replicate +from .parallel_apply import parallel_apply +from torch._utils import ( + _get_all_device_indices, + _get_available_device_type, + _get_device_index, + _get_devices_properties +) + +__all__ = ['DataParallel', 'data_parallel'] + +def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None: + imbalance_warn = """ + There is an imbalance between your GPUs. You may want to exclude GPU {} which + has less than 75% of the memory or cores of GPU {}. You can do so by setting + the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES + environment variable.""" + device_ids = [_get_device_index(x, True) for x in device_ids] + dev_props = _get_devices_properties(device_ids) + + def warn_imbalance(get_prop): + values = [get_prop(props) for props in dev_props] + min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1)) + max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1)) + if min_val / max_val < 0.75: + warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos])) + return True + return False + + if warn_imbalance(lambda props: props.total_memory): + return + if warn_imbalance(lambda props: props.multi_processor_count): + return + + +T = TypeVar("T", bound=Module) + + +class DataParallel(Module, Generic[T]): + r"""Implements data parallelism at the module level. + + This container parallelizes the application of the given :attr:`module` by + splitting the input across the specified devices by chunking in the batch + dimension (other objects will be copied once per device). In the forward + pass, the module is replicated on each device, and each replica handles a + portion of the input. During the backwards pass, gradients from each replica + are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + .. warning:: + It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`, + instead of this class, to do multi-GPU training, even if there is only a single + node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`. + + Arbitrary positional and keyword inputs are allowed to be passed into + DataParallel but some types are specially handled. tensors will be + **scattered** on dim specified (default 0). tuple, list and dict types will + be shallow copied. The other types will be shared among different threads + and can be corrupted if written to in the model's forward pass. + + The parallelized :attr:`module` must have its parameters and buffers on + ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel` + module. + + .. warning:: + In each forward, :attr:`module` is **replicated** on each device, so any + updates to the running module in ``forward`` will be lost. For example, + if :attr:`module` has a counter attribute that is incremented in each + ``forward``, it will always stay at the initial value because the update + is done on the replicas which are destroyed after ``forward``. However, + :class:`~torch.nn.DataParallel` guarantees that the replica on + ``device[0]`` will have its parameters and buffers sharing storage with + the base parallelized :attr:`module`. So **in-place** updates to the + parameters or buffers on ``device[0]`` will be recorded. E.g., + :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm` + rely on this behavior to update the buffers. + + .. warning:: + Forward and backward hooks defined on :attr:`module` and its submodules + will be invoked ``len(device_ids)`` times, each with inputs located on + a particular device. Particularly, the hooks are only guaranteed to be + executed in correct order with respect to operations on corresponding + devices. For example, it is not guaranteed that hooks set via + :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before + `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but + that each such hook be executed before the corresponding + :meth:`~torch.nn.Module.forward` call of that device. + + .. warning:: + When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in + :func:`forward`, this wrapper will return a vector of length equal to + number of devices used in data parallelism, containing the result from + each device. + + .. note:: + There is a subtlety in using the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for + details. + + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices (default: all devices) + output_device (int or torch.device): device location of output (default: device_ids[0]) + + Attributes: + module (Module): the module to be parallelized + + Example:: + + >>> # xdoctest: +SKIP + >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + # TODO: update notes/cuda.rst when this class handles 8+ GPUs well + + def __init__( + self, + module: T, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + ) -> None: + super().__init__() + torch._C._log_api_usage_once("torch.nn.parallel.DataParallel") + device_type = _get_available_device_type() + if device_type is None: + self.module = module + self.device_ids = [] + return + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + self.dim = dim + self.module = module + self.device_ids = [_get_device_index(x, True) for x in device_ids] + self.output_device = _get_device_index(output_device, True) + self.src_device_obj = torch.device(device_type, self.device_ids[0]) + + if device_type == "cuda": + _check_balance(self.device_ids) + + if len(self.device_ids) == 1: + self.module.to(self.src_device_obj) + + def forward(self, *inputs: Any, **kwargs: Any) -> Any: + with torch.autograd.profiler.record_function("DataParallel.forward"): + if not self.device_ids: + return self.module(*inputs, **kwargs) + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {self.src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) + # for forward function without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + if len(self.device_ids) == 1: + return self.module(*inputs[0], **module_kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = self.parallel_apply(replicas, inputs, module_kwargs) + return self.gather(outputs, self.output_device) + + def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]: + return replicate(module, device_ids, not torch.is_grad_enabled()) + + def scatter( + self, + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + device_ids: Sequence[Union[int, torch.device]], + ) -> Any: + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]: + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any: + return gather(outputs, output_device, dim=self.dim) + + +def data_parallel( + module: Module, + inputs: Any, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + module_kwargs: Optional[Any] = None, +) -> torch.Tensor: + r"""Evaluate module(input) in parallel across the GPUs given in device_ids. + + This is the functional version of the DataParallel module. + + Args: + module (Module): the module to evaluate in parallel + inputs (Tensor): inputs to the module + device_ids (list of int or torch.device): GPU ids on which to replicate module + output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU. + (default: device_ids[0]) + Returns: + a Tensor containing the result of module(input) located on + output_device + """ + if not isinstance(inputs, tuple): + inputs = (inputs,) if inputs is not None else () + + device_type = _get_available_device_type() + + if device_type is None: + raise RuntimeError("device type could not be determined") + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + device_ids = [_get_device_index(x, True) for x in device_ids] + output_device = _get_device_index(output_device, True) + src_device_obj = torch.device(device_type, device_ids[0]) + + for t in chain(module.parameters(), module.buffers()): + if t.device != src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) + # for module without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + assert module_kwargs is not None + + if len(device_ids) == 1: + return module(*inputs[0], **module_kwargs[0]) + used_device_ids = device_ids[:len(inputs)] + replicas = replicate(module, used_device_ids) + outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) + return gather(outputs, output_device, dim) diff --git a/valley/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py b/valley/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..935889ddac659cc8c1a003967f5b7e60defa5285 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py @@ -0,0 +1,111 @@ +import threading +import torch +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast +from ..modules import Module +from torch.cuda._utils import _get_device_index +from torch._utils import ExceptionWrapper + +__all__ = ['get_a_var', 'parallel_apply'] + +def get_a_var(obj: Union[torch.Tensor, List[Any], Tuple[Any, ...], Dict[Any, Any]]) -> Optional[torch.Tensor]: + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, (list, tuple)): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + +def parallel_apply( + modules: Sequence[Module], + inputs: Sequence[Any], + kwargs_tup: Optional[Sequence[Dict[str, Any]]] = None, + devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None, +) -> List[Any]: + r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + devices (list of int or torch.device): CUDA devices + + :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs), f'The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}' + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = (cast(Dict[str, Any], {}),) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + devices = [_get_device_index(x, True) for x in devices] + streams = [torch.cuda.current_stream(x) for x in devices] + lock = threading.Lock() + results = {} + grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled() + + def _worker( + i: int, + module: Module, + input: Any, + kwargs: Dict[str, Any], + device: Optional[Union[int, torch.device]] = None, + stream: Optional[torch.cuda.Stream] = None, + ) -> None: + torch.set_grad_enabled(grad_enabled) + if device is None: + t = get_a_var(input) + if t is None: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i}, no device was provided and no tensor input was found; " + "device cannot be resolved") + return + device = t.get_device() + if stream is None: + stream = torch.cuda.current_stream(device) + try: + with torch.cuda.device(device), torch.cuda.stream( + stream + ), torch.amp.autocast("cuda", enabled=autocast_enabled): + # this also avoids accidental slicing of `input` if it is a Tensor + if not isinstance(input, (list, tuple)): + input = (input,) + output = module(*input, **kwargs) + with lock: + results[i] = output + except Exception: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i} on device {device}") + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, kwargs, device, stream)) + for i, (module, input, kwargs, device, stream) in + enumerate(zip(modules, inputs, kwargs_tup, devices, streams))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, ExceptionWrapper): + output.reraise() + outputs.append(output) + return outputs diff --git a/valley/lib/python3.10/site-packages/torch/testing/__init__.py b/valley/lib/python3.10/site-packages/torch/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..352ce67e074a3784c747efef990f0a6833b13963 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/__init__.py @@ -0,0 +1,4 @@ +from torch._C import FileCheck as FileCheck +from . import _utils +from ._comparison import assert_allclose, assert_close as assert_close +from ._creation import make_tensor as make_tensor diff --git a/valley/lib/python3.10/site-packages/torch/testing/_comparison.py b/valley/lib/python3.10/site-packages/torch/testing/_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..9815cc2a8807e868787c29a75a6187e218a6c181 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_comparison.py @@ -0,0 +1,1574 @@ +# mypy: allow-untyped-defs +import abc +import cmath +import collections.abc +import contextlib +from typing import ( + Any, + Callable, + Collection, + Dict, + List, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + Union, +) +from typing_extensions import deprecated + +import torch + +try: + import numpy as np + + NUMPY_AVAILABLE = True +except ModuleNotFoundError: + NUMPY_AVAILABLE = False + + +class ErrorMeta(Exception): + """Internal testing exception that makes that carries error metadata.""" + + def __init__( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> None: + super().__init__( + "If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, please `raise ErrorMeta.to_error()` " + "for user facing errors." + ) + self.type = type + self.msg = msg + self.id = id + + def to_error( + self, msg: Optional[Union[str, Callable[[str], str]]] = None + ) -> Exception: + if not isinstance(msg, str): + generated_msg = self.msg + if self.id: + generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}" + + msg = msg(generated_msg) if callable(msg) else generated_msg + + return self.type(msg) + + +# Some analysis of tolerance by logging tests from test_torch.py can be found in +# https://github.com/pytorch/pytorch/pull/32538. +# {dtype: (rtol, atol)} +_DTYPE_PRECISIONS = { + torch.float16: (0.001, 1e-5), + torch.bfloat16: (0.016, 1e-5), + torch.float32: (1.3e-6, 1e-5), + torch.float64: (1e-7, 1e-7), + torch.complex32: (0.001, 1e-5), + torch.complex64: (1.3e-6, 1e-5), + torch.complex128: (1e-7, 1e-7), +} +# The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in +# their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values` +_DTYPE_PRECISIONS.update( + dict.fromkeys( + (torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32), + _DTYPE_PRECISIONS[torch.float32], + ) +) + + +def default_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None, +) -> Tuple[float, float]: + """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype. + + See :func:`assert_close` for a table of the default tolerance for each dtype. + + Returns: + (Tuple[float, float]): Loosest tolerances of all input dtypes. + """ + dtypes = [] + for input in inputs: + if isinstance(input, torch.Tensor): + dtypes.append(input.dtype) + elif isinstance(input, torch.dtype): + dtypes.append(input) + else: + raise TypeError( + f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead." + ) + dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS + rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes]) + return max(rtols), max(atols) + + +def get_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + rtol: Optional[float], + atol: Optional[float], + id: Tuple[Any, ...] = (), +) -> Tuple[float, float]: + """Gets absolute and relative to be used for numeric comparisons. + + If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of + :func:`default_tolerances` is used. + + Raises: + ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified. + + Returns: + (Tuple[float, float]): Valid absolute and relative tolerances. + """ + if (rtol is None) ^ (atol is None): + # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising + # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0. + raise ErrorMeta( + ValueError, + f"Both 'rtol' and 'atol' must be either specified or omitted, " + f"but got no {'rtol' if rtol is None else 'atol'}.", + id=id, + ) + elif rtol is not None and atol is not None: + return rtol, atol + else: + return default_tolerances(*inputs) + + +def _make_mismatch_msg( + *, + default_identifier: str, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + extra: Optional[str] = None, + abs_diff: float, + abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + atol: float, + rel_diff: float, + rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + rtol: float, +) -> str: + """Makes a mismatch error message for numeric values. + + Args: + default_identifier (str): Default description of the compared values, e.g. "Tensor-likes". + identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides + ``default_identifier``. Can be passed as callable in which case it will be called with + ``default_identifier`` to create the description at runtime. + extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics. + abs_diff (float): Absolute difference. + abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference. + atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are + ``> 0``. + rel_diff (float): Relative difference. + rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference. + rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are + ``> 0``. + """ + equality = rtol == 0 and atol == 0 + + def make_diff_msg( + *, + type: str, + diff: float, + idx: Optional[Union[int, Tuple[int, ...]]], + tol: float, + ) -> str: + if idx is None: + msg = f"{type.title()} difference: {diff}" + else: + msg = f"Greatest {type} difference: {diff} at index {idx}" + if not equality: + msg += f" (up to {tol} allowed)" + return msg + "\n" + + if identifier is None: + identifier = default_identifier + elif callable(identifier): + identifier = identifier(default_identifier) + + msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n" + + if extra: + msg += f"{extra.strip()}\n" + + msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol) + msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol) + + return msg.strip() + + +def make_scalar_mismatch_msg( + actual: Union[bool, int, float, complex], + expected: Union[bool, int, float, complex], + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +) -> str: + """Makes a mismatch error message for scalars. + + Args: + actual (Union[bool, int, float, complex]): Actual scalar. + expected (Union[bool, int, float, complex]): Expected scalar. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Scalars". + """ + abs_diff = abs(actual - expected) + rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected) + return _make_mismatch_msg( + default_identifier="Scalars", + identifier=identifier, + extra=f"Expected {expected} but got {actual}.", + abs_diff=abs_diff, + atol=atol, + rel_diff=rel_diff, + rtol=rtol, + ) + + +def make_tensor_mismatch_msg( + actual: torch.Tensor, + expected: torch.Tensor, + matches: torch.Tensor, + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +): + """Makes a mismatch error message for tensors. + + Args: + actual (torch.Tensor): Actual tensor. + expected (torch.Tensor): Expected tensor. + matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the + location of matches. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Tensor-likes". + """ + + def unravel_flat_index(flat_index: int) -> Tuple[int, ...]: + if not matches.shape: + return () + + inverse_index = [] + for size in matches.shape[::-1]: + div, mod = divmod(flat_index, size) + flat_index = div + inverse_index.append(mod) + + return tuple(inverse_index[::-1]) + + number_of_elements = matches.numel() + total_mismatches = number_of_elements - int(torch.sum(matches)) + extra = ( + f"Mismatched elements: {total_mismatches} / {number_of_elements} " + f"({total_mismatches / number_of_elements:.1%})" + ) + + actual_flat = actual.flatten() + expected_flat = expected.flatten() + matches_flat = matches.flatten() + + if not actual.dtype.is_floating_point and not actual.dtype.is_complex: + # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid + # overflow + actual_flat = actual_flat.to(torch.int64) + expected_flat = expected_flat.to(torch.int64) + + abs_diff = torch.abs(actual_flat - expected_flat) + # Ensure that only mismatches are used for the max_abs_diff computation + abs_diff[matches_flat] = 0 + max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0) + + rel_diff = abs_diff / torch.abs(expected_flat) + # Ensure that only mismatches are used for the max_rel_diff computation + rel_diff[matches_flat] = 0 + max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0) + return _make_mismatch_msg( + default_identifier="Tensor-likes", + identifier=identifier, + extra=extra, + abs_diff=max_abs_diff.item(), + abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)), + atol=atol, + rel_diff=max_rel_diff.item(), + rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)), + rtol=rtol, + ) + + +class UnsupportedInputs(Exception): # noqa: B903 + """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs.""" + + +class Pair(abc.ABC): + """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`. + + Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison. + + Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the + super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to + handle the inputs and the next pair type will be tried. + + All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can + be used to automatically handle overwriting the message with a user supplied one and id handling. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + **unknown_parameters: Any, + ) -> None: + self.actual = actual + self.expected = expected + self.id = id + self._unknown_parameters = unknown_parameters + + @staticmethod + def _inputs_not_supported() -> NoReturn: + raise UnsupportedInputs + + @staticmethod + def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]): + """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise.""" + if not all(isinstance(input, cls) for input in inputs): + Pair._inputs_not_supported() + + def _fail( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> NoReturn: + """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id. + + .. warning:: + + If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id`` + explicitly. + """ + raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id) + + @abc.abstractmethod + def compare(self) -> None: + """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch.""" + + def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]: + """Returns extra information that will be included in the representation. + + Should be overwritten by all subclasses that use additional options. The representation of the object will only + be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of + key-value-pairs or attribute names. + """ + return [] + + def __repr__(self) -> str: + head = f"{type(self).__name__}(" + tail = ")" + body = [ + f" {name}={value!s}," + for name, value in [ + ("id", self.id), + ("actual", self.actual), + ("expected", self.expected), + *[ + (extra, getattr(self, extra)) if isinstance(extra, str) else extra + for extra in self.extra_repr() + ], + ] + ] + return "\n".join((head, *body, *tail)) + + +class ObjectPair(Pair): + """Pair for any type of inputs that will be compared with the `==` operator. + + .. note:: + + Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs + couldn't handle the inputs. + + """ + + def compare(self) -> None: + try: + equal = self.actual == self.expected + except Exception as error: + # We are not using `self._raise_error_meta` here since we need the exception chaining + raise ErrorMeta( + ValueError, + f"{self.actual} == {self.expected} failed with:\n{error}.", + id=self.id, + ) from error + + if not equal: + self._fail(AssertionError, f"{self.actual} != {self.expected}") + + +class NonePair(Pair): + """Pair for ``None`` inputs.""" + + def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None: + if not (actual is None or expected is None): + self._inputs_not_supported() + + super().__init__(actual, expected, **other_parameters) + + def compare(self) -> None: + if not (self.actual is None and self.expected is None): + self._fail( + AssertionError, f"None mismatch: {self.actual} is not {self.expected}" + ) + + +class BooleanPair(Pair): + """Pair for :class:`bool` inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.bool_` inputs. + + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...], + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, **other_parameters) + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls: List[Type] = [bool] + if NUMPY_AVAILABLE: + cls.append(np.bool_) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[bool, bool]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_bool(bool_like, id=id) for bool_like in (actual, expected) + ) + return actual, expected + + def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool: + if isinstance(bool_like, bool): + return bool_like + elif isinstance(bool_like, np.bool_): + return bool_like.item() + else: + raise ErrorMeta( + TypeError, f"Unknown boolean type {type(bool_like)}.", id=id + ) + + def compare(self) -> None: + if self.actual is not self.expected: + self._fail( + AssertionError, + f"Booleans mismatch: {self.actual} is not {self.expected}", + ) + + +class NumberPair(Pair): + """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.number` inputs. + + Kwargs: + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``. + + The following table displays correspondence between Python number type and the ``torch.dtype``'s. See + :func:`assert_close` for the corresponding tolerances. + + +------------------+-------------------------------+ + | ``type`` | corresponding ``torch.dtype`` | + +==================+===============================+ + | :class:`int` | :attr:`~torch.int64` | + +------------------+-------------------------------+ + | :class:`float` | :attr:`~torch.float64` | + +------------------+-------------------------------+ + | :class:`complex` | :attr:`~torch.complex64` | + +------------------+-------------------------------+ + """ + + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float64, + complex: torch.complex128, + } + _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys()) + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_dtype: bool = False, + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)], + rtol=rtol, + atol=atol, + id=id, + ) + self.equal_nan = equal_nan + self.check_dtype = check_dtype + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls = list(self._NUMBER_TYPES) + if NUMPY_AVAILABLE: + cls.append(np.number) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[Union[int, float, complex], Union[int, float, complex]]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_number(number_like, id=id) for number_like in (actual, expected) + ) + return actual, expected + + def _to_number( + self, number_like: Any, *, id: Tuple[Any, ...] + ) -> Union[int, float, complex]: + if NUMPY_AVAILABLE and isinstance(number_like, np.number): + return number_like.item() + elif isinstance(number_like, self._NUMBER_TYPES): + return number_like # type: ignore[return-value] + else: + raise ErrorMeta( + TypeError, f"Unknown number type {type(number_like)}.", id=id + ) + + def compare(self) -> None: + if self.check_dtype and type(self.actual) is not type(self.expected): + self._fail( + AssertionError, + f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.", + ) + + if self.actual == self.expected: + return + + if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected): + return + + abs_diff = abs(self.actual - self.expected) + tolerance = self.atol + self.rtol * abs(self.expected) + + if cmath.isfinite(abs_diff) and abs_diff <= tolerance: + return + + self._fail( + AssertionError, + make_scalar_mismatch_msg( + self.actual, self.expected, rtol=self.rtol, atol=self.atol + ), + ) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_dtype", + ) + + +class TensorLikePair(Pair): + """Pair for :class:`torch.Tensor`-like inputs. + + Kwargs: + allow_subclasses (bool): + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + **other_parameters: Any, + ): + actual, expected = self._process_inputs( + actual, expected, id=id, allow_subclasses=allow_subclasses + ) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + actual, expected, rtol=rtol, atol=atol, id=self.id + ) + self.equal_nan = equal_nan + self.check_device = check_device + self.check_dtype = check_dtype + self.check_layout = check_layout + self.check_stride = check_stride + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool + ) -> Tuple[torch.Tensor, torch.Tensor]: + directly_related = isinstance(actual, type(expected)) or isinstance( + expected, type(actual) + ) + if not directly_related: + self._inputs_not_supported() + + if not allow_subclasses and type(actual) is not type(expected): + self._inputs_not_supported() + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + def _to_tensor(self, tensor_like: Any) -> torch.Tensor: + if isinstance(tensor_like, torch.Tensor): + return tensor_like + + try: + return torch.as_tensor(tensor_like) + except Exception: + self._inputs_not_supported() + + def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None: + if tensor.layout not in { + torch.strided, + torch.sparse_coo, + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + raise ErrorMeta( + ValueError, f"Unsupported tensor layout {tensor.layout}", id=id + ) + + def compare(self) -> None: + actual, expected = self.actual, self.expected + + self._compare_attributes(actual, expected) + if any(input.device.type == "meta" for input in (actual, expected)): + return + + actual, expected = self._equalize_attributes(actual, expected) + self._compare_values(actual, expected) + + def _compare_attributes( + self, + actual: torch.Tensor, + expected: torch.Tensor, + ) -> None: + """Checks if the attributes of two tensors match. + + Always checks + + - the :attr:`~torch.Tensor.shape`, + - whether both inputs are quantized or not, + - and if they use the same quantization scheme. + + Checks for + + - :attr:`~torch.Tensor.layout`, + - :meth:`~torch.Tensor.stride`, + - :attr:`~torch.Tensor.device`, and + - :attr:`~torch.Tensor.dtype` + + are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair. + """ + + def raise_mismatch_error( + attribute_name: str, actual_value: Any, expected_value: Any + ) -> NoReturn: + self._fail( + AssertionError, + f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.", + ) + + if actual.shape != expected.shape: + raise_mismatch_error("shape", actual.shape, expected.shape) + + if actual.is_quantized != expected.is_quantized: + raise_mismatch_error( + "is_quantized", actual.is_quantized, expected.is_quantized + ) + elif actual.is_quantized and actual.qscheme() != expected.qscheme(): + raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme()) + + if actual.layout != expected.layout: + if self.check_layout: + raise_mismatch_error("layout", actual.layout, expected.layout) + elif ( + actual.layout == torch.strided + and self.check_stride + and actual.stride() != expected.stride() + ): + raise_mismatch_error("stride()", actual.stride(), expected.stride()) + + if self.check_device and actual.device != expected.device: + raise_mismatch_error("device", actual.device, expected.device) + + if self.check_dtype and actual.dtype != expected.dtype: + raise_mismatch_error("dtype", actual.dtype, expected.dtype) + + def _equalize_attributes( + self, actual: torch.Tensor, expected: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Equalizes some attributes of two tensors for value comparison. + + If ``actual`` and ``expected`` are ... + + - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory. + - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`). + - ... not of the same ``layout``, they are converted to strided tensors. + + Args: + actual (Tensor): Actual tensor. + expected (Tensor): Expected tensor. + + Returns: + (Tuple[Tensor, Tensor]): Equalized tensors. + """ + # The comparison logic uses operators currently not supported by the MPS backends. + # See https://github.com/pytorch/pytorch/issues/77144 for details. + # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend + if actual.is_mps or expected.is_mps: # type: ignore[attr-defined] + actual = actual.cpu() + expected = expected.cpu() + + if actual.device != expected.device: + actual = actual.cpu() + expected = expected.cpu() + + if actual.dtype != expected.dtype: + actual_dtype = actual.dtype + expected_dtype = expected.dtype + # For uint64, this is not sound in general, which is why promote_types doesn't + # allow it, but for easy testing, we're unlikely to get confused + # by large uint64 overflowing into negative int64 + if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]: + actual_dtype = torch.int64 + if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]: + expected_dtype = torch.int64 + dtype = torch.promote_types(actual_dtype, expected_dtype) + actual = actual.to(dtype) + expected = expected.to(dtype) + + if actual.layout != expected.layout: + # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided + actual = actual.to_dense() if actual.layout != torch.strided else actual + expected = ( + expected.to_dense() if expected.layout != torch.strided else expected + ) + + return actual, expected + + def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None: + if actual.is_quantized: + compare_fn = self._compare_quantized_values + elif actual.is_sparse: + compare_fn = self._compare_sparse_coo_values + elif actual.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + compare_fn = self._compare_sparse_compressed_values + else: + compare_fn = self._compare_regular_values_close + + compare_fn( + actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan + ) + + def _compare_quantized_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness. + + .. note:: + + A detailed discussion about why only the dequantized variant is checked for closeness rather than checking + the individual quantization parameters for closeness and the integer representation for equality can be + found in https://github.com/pytorch/pytorch/issues/68548. + """ + return self._compare_regular_values_close( + actual.dequantize(), + expected.dequantize(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}", + ) + + def _compare_sparse_coo_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse COO tensors by comparing + + - the number of sparse dimensions, + - the number of non-zero elements (nnz) for equality, + - the indices for equality, and + - the values for closeness. + """ + if actual.sparse_dim() != expected.sparse_dim(): + self._fail( + AssertionError, + ( + f"The number of sparse dimensions in sparse COO tensors does not match: " + f"{actual.sparse_dim()} != {expected.sparse_dim()}" + ), + ) + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse COO tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + self._compare_regular_values_equal( + actual._indices(), + expected._indices(), + identifier="Sparse COO indices", + ) + self._compare_regular_values_close( + actual._values(), + expected._values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier="Sparse COO values", + ) + + def _compare_sparse_compressed_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse compressed tensors by comparing + + - the number of non-zero elements (nnz) for equality, + - the plain indices for equality, + - the compressed indices for equality, and + - the values for closeness. + """ + format_name, compressed_indices_method, plain_indices_method = { + torch.sparse_csr: ( + "CSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_csc: ( + "CSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + torch.sparse_bsr: ( + "BSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_bsc: ( + "BSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + }[actual.layout] + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse {format_name} tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_ + # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it + # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will + # fail. + actual_compressed_indices = compressed_indices_method(actual) + expected_compressed_indices = compressed_indices_method(expected) + indices_dtype = torch.promote_types( + actual_compressed_indices.dtype, expected_compressed_indices.dtype + ) + + self._compare_regular_values_equal( + actual_compressed_indices.to(indices_dtype), + expected_compressed_indices.to(indices_dtype), + identifier=f"Sparse {format_name} {compressed_indices_method.__name__}", + ) + self._compare_regular_values_equal( + plain_indices_method(actual).to(indices_dtype), + plain_indices_method(expected).to(indices_dtype), + identifier=f"Sparse {format_name} {plain_indices_method.__name__}", + ) + self._compare_regular_values_close( + actual.values(), + expected.values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=f"Sparse {format_name} values", + ) + + def _compare_regular_values_equal( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + equal_nan: bool = False, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are equal.""" + self._compare_regular_values_close( + actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier + ) + + def _compare_regular_values_close( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are close up to a desired tolerance.""" + matches = torch.isclose( + actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan + ) + if torch.all(matches): + return + + if actual.shape == torch.Size([]): + msg = make_scalar_mismatch_msg( + actual.item(), + expected.item(), + rtol=rtol, + atol=atol, + identifier=identifier, + ) + else: + msg = make_tensor_mismatch_msg( + actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier + ) + self._fail(AssertionError, msg) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_device", + "check_dtype", + "check_layout", + "check_stride", + ) + + +def originate_pairs( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]], + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + id: Tuple[Any, ...] = (), + **options: Any, +) -> List[Pair]: + """Originates pairs from the individual inputs. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs. + First successful pair will be used. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message. + **options (Any): Options passed to each pair during construction. + + Raises: + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their + length does not match. + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of + keys do not match. + ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs. + ErrorMeta: With any expected exception that happens during the construction of a pair. + + Returns: + (List[Pair]): Originated pairs. + """ + # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop: + # "a" == "a"[0][0]... + if ( + isinstance(actual, sequence_types) + and not isinstance(actual, str) + and isinstance(expected, sequence_types) + and not isinstance(expected, str) + ): + actual_len = len(actual) + expected_len = len(expected) + if actual_len != expected_len: + raise ErrorMeta( + AssertionError, + f"The length of the sequences mismatch: {actual_len} != {expected_len}", + id=id, + ) + + pairs = [] + for idx in range(actual_len): + pairs.extend( + originate_pairs( + actual[idx], + expected[idx], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, idx), + **options, + ) + ) + return pairs + + elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types): + actual_keys = set(actual.keys()) + expected_keys = set(expected.keys()) + if actual_keys != expected_keys: + missing_keys = expected_keys - actual_keys + additional_keys = actual_keys - expected_keys + raise ErrorMeta( + AssertionError, + ( + f"The keys of the mappings do not match:\n" + f"Missing keys in the actual mapping: {sorted(missing_keys)}\n" + f"Additional keys in the actual mapping: {sorted(additional_keys)}" + ), + id=id, + ) + + keys: Collection = actual_keys + # Since the origination aborts after the first failure, we try to be deterministic + with contextlib.suppress(Exception): + keys = sorted(keys) + + pairs = [] + for key in keys: + pairs.extend( + originate_pairs( + actual[key], + expected[key], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, key), + **options, + ) + ) + return pairs + + else: + for pair_type in pair_types: + try: + return [pair_type(actual, expected, id=id, **options)] + # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the + # inputs. Thus, we try the next pair type. + except UnsupportedInputs: + continue + # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This + # is only in a separate branch, because the one below would also except it. + except ErrorMeta: + raise + # Raising any other exception during origination is unexpected and will give some extra information about + # what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n" + f"{type(actual).__name__}(): {actual}\n\n" + f"and\n\n" + f"{type(expected).__name__}(): {expected}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + else: + raise ErrorMeta( + TypeError, + f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.", + id=id, + ) + + +def not_close_error_metas( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]] = (ObjectPair,), + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + **options: Any, +) -> List[ErrorMeta]: + """Asserts that inputs are equal. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the + inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + **options (Any): Options passed to each pair during construction. + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + try: + pairs = originate_pairs( + actual, + expected, + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + **options, + ) + except ErrorMeta as error_meta: + # Explicitly raising from None to hide the internal traceback + raise error_meta.to_error() from None # noqa: RSE102 + + error_metas: List[ErrorMeta] = [] + for pair in pairs: + try: + pair.compare() + except ErrorMeta as error_meta: + error_metas.append(error_meta) + # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information + # about what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Comparing\n\n" + f"{pair}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + + # [ErrorMeta Cycles] + # ErrorMeta objects in this list capture + # tracebacks that refer to the frame of this function. + # The local variable `error_metas` refers to the error meta + # objects, creating a reference cycle. Frames in the traceback + # would not get freed until cycle collection, leaking cuda memory in tests. + # We break the cycle by removing the reference to the error_meta objects + # from this frame as it returns. + error_metas = [error_metas] + return error_metas.pop() + + +def assert_close( + actual: Any, + expected: Any, + *, + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + msg: Optional[Union[str, Callable[[str], str]]] = None, +): + r"""Asserts that ``actual`` and ``expected`` are close. + + If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if + + .. math:: + + \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert + + Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are + only considered equal to each other if ``equal_nan`` is ``True``. + + In addition, they are only considered close if they have the same + + - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``), + - ``dtype`` (if ``check_dtype`` is ``True``), + - ``layout`` (if ``check_layout`` is ``True``), and + - stride (if ``check_stride`` is ``True``). + + If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed. + + If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are + checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR, + or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively, + are always checked for equality whereas the values are checked for closeness according to the definition above. + + If ``actual`` and ``expected`` are quantized, they are considered close if they have the same + :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the + definition above. + + ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which + :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types + have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s + or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all + their elements are considered close according to the above definition. + + .. note:: + + Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e. + :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus, + Python scalars of different types can be checked, but require ``check_dtype=False``. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types + are allowed. Otherwise type equality is required. + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during + the comparison. Can also passed as callable in which case it will be called with the generated message and + should return the new message. + + Raises: + ValueError: If no :class:`torch.Tensor` can be constructed from an input. + ValueError: If only ``rtol`` or ``atol`` is specified. + AssertionError: If corresponding inputs are not Python scalars and are not directly related. + AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have + different types. + AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match. + AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match. + AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`. + AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same + :attr:`~torch.Tensor.layout`. + AssertionError: If only one of corresponding tensors is quantized. + AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s. + AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same + :attr:`~torch.Tensor.device`. + AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``. + AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride. + AssertionError: If the values of corresponding tensors are not close according to the definition above. + + The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching + ``dtype``'s, the maximum of both tolerances is used. + + +---------------------------+------------+----------+ + | ``dtype`` | ``rtol`` | ``atol`` | + +===========================+============+==========+ + | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | other | ``0.0`` | ``0.0`` | + +---------------------------+------------+----------+ + + .. note:: + + :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged + to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might + define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default: + + >>> import functools + >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) + >>> assert_equal(1e-9, 1e-10) + Traceback (most recent call last): + ... + AssertionError: Scalars are not equal! + + Expected 1e-10 but got 1e-09. + Absolute difference: 9.000000000000001e-10 + Relative difference: 9.0 + + Examples: + >>> # tensor to tensor comparison + >>> expected = torch.tensor([1e0, 1e-1, 1e-2]) + >>> actual = torch.acos(torch.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # scalar to scalar comparison + >>> import math + >>> expected = math.sqrt(2.0) + >>> actual = 2.0 / math.sqrt(2.0) + >>> torch.testing.assert_close(actual, expected) + + >>> # numpy array to numpy array comparison + >>> import numpy as np + >>> expected = np.array([1e0, 1e-1, 1e-2]) + >>> actual = np.arccos(np.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # sequence to sequence comparison + >>> import numpy as np + >>> # The types of the sequences do not have to match. They only have to have the same + >>> # length and their elements have to match. + >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)] + >>> actual = tuple(expected) + >>> torch.testing.assert_close(actual, expected) + + >>> # mapping to mapping comparison + >>> from collections import OrderedDict + >>> import numpy as np + >>> foo = torch.tensor(1.0) + >>> bar = 2.0 + >>> baz = np.array(3.0) + >>> # The types and a possible ordering of mappings do not have to match. They only + >>> # have to have the same set of keys and their elements have to match. + >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)]) + >>> actual = {"baz": baz, "bar": bar, "foo": foo} + >>> torch.testing.assert_close(actual, expected) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = expected.clone() + >>> # By default, directly related instances can be compared + >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected) + >>> # This check can be made more strict with allow_subclasses=False + >>> torch.testing.assert_close( + ... torch.nn.Parameter(actual), expected, allow_subclasses=False + ... ) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # If the inputs are not directly related, they are never considered close + >>> torch.testing.assert_close(actual.numpy(), expected) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # Exceptions to these rules are Python scalars. They can be checked regardless of + >>> # their type if check_dtype=False. + >>> torch.testing.assert_close(1.0, 1, check_dtype=False) + + >>> # NaN != NaN by default. + >>> expected = torch.tensor(float("Nan")) + >>> actual = expected.clone() + >>> torch.testing.assert_close(actual, expected) + Traceback (most recent call last): + ... + AssertionError: Scalars are not close! + + Expected nan but got nan. + Absolute difference: nan (up to 1e-05 allowed) + Relative difference: nan (up to 1.3e-06 allowed) + >>> torch.testing.assert_close(actual, expected, equal_nan=True) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = torch.tensor([1.0, 4.0, 5.0]) + >>> # The default error message can be overwritten. + >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!") + Traceback (most recent call last): + ... + AssertionError: Argh, the tensors are not close! + >>> # If msg is a callable, it can be used to augment the generated message with + >>> # extra information + >>> torch.testing.assert_close( + ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter" + ... ) + Traceback (most recent call last): + ... + AssertionError: Header + + Tensor-likes are not close! + + Mismatched elements: 2 / 3 (66.7%) + Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed) + Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed) + + Footer + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + error_metas = not_close_error_metas( + actual, + expected, + pair_types=( + NonePair, + BooleanPair, + NumberPair, + TensorLikePair, + ), + allow_subclasses=allow_subclasses, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=check_device, + check_dtype=check_dtype, + check_layout=check_layout, + check_stride=check_stride, + msg=msg, + ) + + if error_metas: + # TODO: compose all metas into one AssertionError + raise error_metas[0].to_error(msg) + + +@deprecated( + "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. " + "Please use `torch.testing.assert_close()` instead. " + "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.", + category=FutureWarning, +) +def assert_allclose( + actual: Any, + expected: Any, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = True, + msg: str = "", +) -> None: + """ + .. warning:: + + :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release. + Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions + `here `_. + """ + if not isinstance(actual, torch.Tensor): + actual = torch.tensor(actual) + if not isinstance(expected, torch.Tensor): + expected = torch.tensor(expected, dtype=actual.dtype) + + if rtol is None and atol is None: + rtol, atol = default_tolerances( + actual, + expected, + dtype_precisions={ + torch.float16: (1e-3, 1e-3), + torch.float32: (1e-4, 1e-5), + torch.float64: (1e-5, 1e-8), + }, + ) + + torch.testing.assert_close( + actual, + expected, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=True, + check_dtype=False, + check_stride=False, + msg=msg or None, + ) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_creation.py b/valley/lib/python3.10/site-packages/torch/testing/_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..d8fb2ef18b1d506b87d942080edb8736c6f6e543 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_creation.py @@ -0,0 +1,268 @@ +""" +This module contains tensor creation utilities. +""" + +import collections.abc +import math +import warnings +from typing import cast, List, Optional, Tuple, Union + +import torch + +_INTEGRAL_TYPES = [ + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint16, + torch.uint32, + torch.uint64, +] +_FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64] +_FLOATING_8BIT_TYPES = [ + torch.float8_e4m3fn, + torch.float8_e5m2, + torch.float8_e4m3fnuz, + torch.float8_e5m2fnuz, +] +_COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128] +_BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES] +_FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES] + + +def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor: + # uniform_ requires to-from <= std::numeric_limits::max() + # Work around this by scaling the range before and after the PRNG + if high - low >= torch.finfo(t.dtype).max: + return t.uniform_(low / 2, high / 2).mul_(2) + else: + return t.uniform_(low, high) + + +def make_tensor( + *shape: Union[int, torch.Size, List[int], Tuple[int, ...]], + dtype: torch.dtype, + device: Union[str, torch.device], + low: Optional[float] = None, + high: Optional[float] = None, + requires_grad: bool = False, + noncontiguous: bool = False, + exclude_zero: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> torch.Tensor: + r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with + values uniformly drawn from ``[low, high)``. + + If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable + finite values then they are clamped to the lowest or highest representable finite value, respectively. + If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`, + which depend on :attr:`dtype`. + + +---------------------------+------------+----------+ + | ``dtype`` | ``low`` | ``high`` | + +===========================+============+==========+ + | boolean type | ``0`` | ``2`` | + +---------------------------+------------+----------+ + | unsigned integral type | ``0`` | ``10`` | + +---------------------------+------------+----------+ + | signed integral types | ``-9`` | ``10`` | + +---------------------------+------------+----------+ + | floating types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + | complex types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + + Args: + shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor. + dtype (:class:`torch.dtype`): The data type of the returned tensor. + device (Union[str, torch.device]): The device of the returned tensor. + low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is + clamped to the least representable finite value of the given dtype. When ``None`` (default), + this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is + clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value + is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + + .. deprecated:: 2.1 + + Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated + since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead. + + requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``. + noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is + ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``. + exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value + depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating + point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the + :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number + whose real and imaginary parts are both the smallest positive normal number representable by the complex + type. Default ``False``. + memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive + with ``noncontiguous``. + + Raises: + ValueError: If ``requires_grad=True`` is passed for integral `dtype` + ValueError: If ``low >= high``. + ValueError: If either :attr:`low` or :attr:`high` is ``nan``. + ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed. + TypeError: If :attr:`dtype` isn't supported by this function. + + Examples: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> from torch.testing import make_tensor + >>> # Creates a float tensor with values in [-1, 1) + >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1) + >>> # xdoctest: +SKIP + tensor([ 0.1205, 0.2282, -0.6380]) + >>> # Creates a bool tensor on CUDA + >>> make_tensor((2, 2), device='cuda', dtype=torch.bool) + tensor([[False, False], + [False, True]], device='cuda:0') + """ + + def modify_low_high( + low: Optional[float], + high: Optional[float], + *, + lowest_inclusive: float, + highest_exclusive: float, + default_low: float, + default_high: float, + ) -> Tuple[float, float]: + """ + Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high) + if required. + """ + + def clamp(a: float, l: float, h: float) -> float: + return min(max(a, l), h) + + low = low if low is not None else default_low + high = high if high is not None else default_high + + if any(isinstance(value, float) and math.isnan(value) for value in [low, high]): + raise ValueError( + f"`low` and `high` cannot be NaN, but got {low=} and {high=}" + ) + elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES: + warnings.warn( + "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types " + "is deprecated since 2.1 and will be removed in 2.3. " + "Use `torch.full(...)` instead.", + FutureWarning, + stacklevel=3, + ) + elif low >= high: + raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}") + elif high < lowest_inclusive or low >= highest_exclusive: + raise ValueError( + f"The value interval specified by `low` and `high` is [{low}, {high}), " + f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})" + ) + + low = clamp(low, lowest_inclusive, highest_exclusive) + high = clamp(high, lowest_inclusive, highest_exclusive) + + if dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval + # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of + # `torch.randint` is exclusive, and thus we need to ceil here as well. + return math.ceil(low), math.ceil(high) + + return low, high + + if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence): + shape = shape[0] # type: ignore[assignment] + shape = cast(Tuple[int, ...], tuple(shape)) + + if noncontiguous and memory_format is not None: + raise ValueError( + f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, " + f"but got {noncontiguous=} and {memory_format=}" + ) + + if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + raise ValueError( + f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}" + ) + + if dtype is torch.bool: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=0, + highest_exclusive=2, + default_low=0, + default_high=2, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=torch.iinfo(dtype).min, + highest_exclusive=torch.iinfo(dtype).max + # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint` + # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot + # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here. + + (1 if dtype is not torch.int64 else 0), + # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`, + # _after_ we use the default value, we don't need to special case it here + default_low=-9, + default_high=10, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _FLOATING_OR_COMPLEX_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=dtype) + _uniform_random_( + torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high + ) + elif dtype in _FLOATING_8BIT_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=torch.float32) + _uniform_random_(result, low, high) + result = result.to(dtype) + else: + raise TypeError( + f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()." + " To request support, file an issue at: https://github.com/pytorch/pytorch/issues" + ) + + if noncontiguous and result.numel() > 1: + result = torch.repeat_interleave(result, 2, dim=-1) + result = result[..., ::2] + elif memory_format is not None: + result = result.clone(memory_format=memory_format) + + if exclude_zero: + result[result == 0] = ( + 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny + ) + + if dtype in _FLOATING_OR_COMPLEX_TYPES: + result.requires_grad = requires_grad + + return result diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/__init__.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..8527084f4afa8206c20616ece74f282d1e1b2092 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py @@ -0,0 +1,370 @@ +# mypy: ignore-errors + +import torch +from torch.testing._internal.common_utils import TEST_WITH_ROCM + + +class AutocastTestLists: + def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype): + input = (torch.randn((n, n), device=dev, dtype=torch.float32),) + + hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else + torch.randn((n, n), device=dev, dtype=torch.float32),) + + weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih + torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh + torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih + torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh + + # returns args as a tuple + return input + hx + weights + + # Supplies ops and arguments for test_autocast_* in test/test_cuda.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py. + # Each op is associated with a tuple of valid arguments. + # In addition, cudnn conv ops are not supported on ROCm and hence will + # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ] + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + # The remaining lists organize ops that autocast treats explicitly. + self.torch_fp16 = [ + # deprecated _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True)), + # the current _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True, True)), + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("conv_tbc", conv_args_fp32[0] + bias_fp32), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)), + ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), + (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("prelu", pointwise0_fp32 + element0_fp32), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32), + ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32), + ("mm", mat0_fp32 + mat1_fp32), + ("mv", mat0_fp32 + pointwise0_fp32), + ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell. + # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)), + ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ] + self.torch_fp32 = [ + ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("cosh", pointwise0_fp16), + ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)), + ("exp", pointwise0_fp16), + ("expm1", pointwise0_fp16), + ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)), + ("reciprocal", pointwise0_fp16), + ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)), + ("sinh", pointwise0_fp16), + ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)), + # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API. + ("softmax", pointwise0_fp16 + (0,)), + ("log_softmax", pointwise0_fp16 + (0,)), + ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)), + ("group_norm", mat0_fp16 + (1,)), + ("norm", pointwise0_fp16), + ("norm", pointwise0_fp16, {"dim": 0}), + # these need magma + # ("norm", mat0_fp16, {"p": "nuc"}), + # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}), + ("norm", pointwise0_fp16, {"p": 1}), + ("norm", pointwise0_fp16, {"p": 1, "dim": 0}), + ("cosine_similarity", mat0_fp16 + mat1_fp16), + ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)), + ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16), + ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("cumprod", pointwise0_fp16 + (0,)), + ("cumsum", pointwise0_fp16 + (0,)), + ("dist", pointwise0_fp16 + pointwise1_fp16), + ("pdist", mat0_fp16), + ("cdist", mat0_fp16 + mat1_fp16), + ("prod", pointwise0_fp16), + ("prod", pointwise0_fp16 + (0,)), + ("renorm", mat0_fp16 + (2, 0, 1.0)), + ("sum", pointwise0_fp16), + ("sum", mat0_fp16 + (1,)), + ("logsumexp", mat0_fp16 + (1,)), + ] + self.torch_need_autocast_promote = [ + ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)), + ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16), + ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)), + ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev), + torch.randn((1, 2), dtype=torch.float32, device=dev), + torch.randn((1, 2, 2), dtype=torch.float16, device=dev), + torch.randn((1,), dtype=torch.float32, device=dev))), + ("cross", (torch.randn(3, dtype=torch.float32, device=dev), + torch.randn(3, dtype=torch.float16, device=dev))), + ("dot", pointwise0_fp16 + pointwise1_fp32), + ("vdot", pointwise0_fp16 + pointwise1_fp32), + ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev), + torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev), + 0, 0, False)), + ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float16))), + ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float32))), + ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float32, device=dev))), + ] + self.nn_fp16 = [ + ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32), + ] + self.nn_fp32 = [ + ("softplus", pointwise0_fp16), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_fp16 + mat1_fp16), + ("smooth_l1_loss", mat0_fp16 + mat1_fp16), + ("mse_loss", mat0_fp16 + mat1_fp16), + ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ] + self.linalg_fp16 = [ + ("linalg_vecdot", mat0_fp32 + mat0_fp32), + ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)), + ] + self.methods_fp16 = [ + ("__matmul__", mat0_fp32 + mat1_fp32) + ] + self.methods_fp32 = [ + ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)), + ] + self.banned = [ + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32), + torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn), + ] + +class AutocastCPUTestLists: + # Supplies ops and arguments for test_autocast_* in test/test_cpu.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + + dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) + + dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) + for dimset in dummy_dimsets] + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev), + torch.randn(dimset, dtype=torch.bfloat16, device=dev)) + for dimset in dimsets] + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),) + for dimset in dummy_dimsets] + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. + # Each op is associated with a tuple of valid arguments. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + # The remaining lists organize ops that autocast treats explicitly. + self.torch_16 = [ + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("mm", mat0_fp32 + mat1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32), + torch.randn((5, 3, 5), device=dev, dtype=torch.float32), + torch.randn(5, device=dev, dtype=torch.float32), + 0)), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("prelu", pointwise0_fp32 + element0_fp32), + ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32), + torch.randn((3 * n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n), device=dev, dtype=torch.float32))), + ] + self.torch_fp32 = [ + ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)), + ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16), + ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ] + self.nn_16 = [ + ("linear", mat0_fp32 + mat1_fp32, {}), + ] + self.nn_fp32 = [ + ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_bf16 + mat1_bf16), + ("smooth_l1_loss", mat0_bf16 + mat1_bf16), + ("mse_loss", mat0_bf16 + mat1_bf16), + ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ("huber_loss", mat0_bf16 + mat1_bf16), + ] + self.torch_need_autocast_promote = [ + ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ] diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py new file mode 100644 index 0000000000000000000000000000000000000000..e092c4d9339b7c804328fe39826710f89354e49d --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py @@ -0,0 +1,635 @@ +# mypy: ignore-errors + +import torch +from functools import partial +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np + +# Note: [autograd.Function db] +# +# This is a collection of autograd.Function test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if a subsystem +# supports autograd.Function. +# +# Axes: +# - saves {output, input, intermediate, non-tensor} +# - {inputs, output} x {single tensor, tensors, arbitrary objects} +# - Uses {mark_dirty, mark_non_differentiable, once_differentiable} + + +def to_numpy(tensor): + return tensor.cpu().numpy() + + +class NumpyCube(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + dinput = torch.tensor(3 * input_np ** 2, device=input.device) + return torch.tensor(input_np ** 3, device=input.device), dinput + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(inputs[0], output[1]) + ctx.save_for_forward(inputs[0], output[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input) + + @staticmethod + def vmap(info, in_dims, input): + result = NumpyCube.apply(input) + return result, (in_dims[0], in_dims[0]) + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +class CubeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x): + return x ** 3, 3 * x ** 2 + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(inputs[0], outputs[1]) + ctx.save_for_forward(inputs[0], outputs[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + result = grad_output * dinput + 6 * dinput + return result + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(1, low=0.8, high=2), args=()) + + +class NumpyCubeNotComposable(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + return torch.tensor(input_np ** 3, device=input.device), input_np + + @staticmethod + def setup_context(ctx, inputs, output): + _, input_np = output + ctx.input_np = input_np + ctx.device = inputs[0].device + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output, grad_saved): + result_np = 3 * (ctx.input_np ** 2) + return torch.tensor(result_np, device=ctx.device) + + +class NumpyMul(torch.autograd.Function): + @staticmethod + def forward(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = NumpyMul.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = NumpyMul.apply(grad_output, x) + return gx, gy + + @staticmethod + def vmap(info, in_dims, x, y): + x_bdim, y_bdim = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) + result = NumpyMul.apply(x, y) + result = result.movedim(-1, 0) + return result, 0 + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + +def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Broadcasting + yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),)) + +def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14}) + +class MulGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x * y + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = MulGenVmap.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = MulGenVmap.apply(grad_output, x) + return gx, gy + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + + +class NumpyExp_(torch.autograd.Function): + @staticmethod + def forward(x): + x_np = to_numpy(x) + np.exp(x_np, x_np) + return x + + @staticmethod + def setup_context(ctx, inputs, output): + x, = inputs + ctx.mark_dirty(x) + ctx.save_for_backward(output) + ctx.save_for_forward(output) + + @staticmethod + def backward(ctx, grad_output): + output, = ctx.saved_tensors + return NumpyMul.apply(grad_output, output) + + @staticmethod + def vmap(info, in_dims, x): + NumpyExp_.apply(x) + return x, in_dims[0] + + @staticmethod + def jvp(ctx, x_tangent): + # Doesn't call numpy operations because I didn't want to write NumpyMul_ + output, = ctx.saved_tensors + x_tangent.mul_(output) + return x_tangent + +class NumpySort(torch.autograd.Function): + @staticmethod + def forward(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(x, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + + @staticmethod + def setup_context(ctx, inputs, output): + x, dim = inputs + _, ind, ind_inv = output + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def vmap(info, in_dims, x, dim): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + # wrap dim + dim = dim if dim >= 0 else dim + x.dim() - 1 + return NumpySort.apply(x, dim + 1), (0, 0, 0) + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + +class SortGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, dim): + device = x.device + ind = torch.argsort(x, dim=dim) + ind_inv = torch.argsort(ind, axis=dim) + result = torch.take_along_dim(x, ind, dim=dim) + return result, ind, ind_inv + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, dim = inputs + _, ind, ind_inv = outputs + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + + +def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(1,)) + + +def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor = make_arg(3, 5) + dim = 1 + _, ind, ind_inv = NumpySort.apply(tensor, 1) + yield SampleInput(tensor, args=(ind, ind_inv, dim)) + + +class NumpyTake(torch.autograd.Function): + @staticmethod + def forward(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + + @staticmethod + def setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def vmap(info, in_dims, x, ind, ind_inv, dim): + x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims + + # wrap dim + logical_dim = x.dim() if x_bdim is None else x_bdim - 1 + dim = dim if dim >= 0 else dim + logical_dim + + def expand_bdim(x, x_bdim): + if x_bdim is None: + return x.expand(info.batch_size, *x.shape) + return x.movedim(x_bdim, 0) + + x = expand_bdim(x, x_bdim) + ind = expand_bdim(ind, ind_bdim) + ind_inv = expand_bdim(ind_inv, ind_inv_bdim) + + return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0 + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + assert ind_tangent is None + assert ind_inv_tangent is None + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim) + +class TakeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, ind, ind_inv, dim): + return torch.take_along_dim(x, ind, dim) + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim) + +class Select(torch.autograd.Function): + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return Select.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return Select.apply(x_tangent, ctx.idx) + +class SelectGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def jvp(ctx, x_tangent, _): + return SelectGenVmap.apply(x_tangent, ctx.idx) + + +def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(2,)) + +class ScaleGradGenVmap(torch.autograd.Function): + generate_vmap_rule = True + scale = 3.14 + + @staticmethod + def forward(x): + return x.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ScaleGradGenVmap.scale + + @staticmethod + def jvp(ctx, x_tangent): + return x_tangent * ScaleGradGenVmap.scale + +class ZeroGradientsGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x.clone(), y.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + # Intentionally too-large gradient + torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + @staticmethod + def jvp(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + +def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5)) + + +class ForwardHasDefaultArgs(torch.autograd.Function): + @staticmethod + def forward(x, idx=(2,)): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return ForwardHasDefaultArgs.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx) + + +autograd_function_db = [ + OpInfo( + 'NumpyCubeAutogradFunction', + op=NumpyCube.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyExpMarkDirtyAutogradFunction', + op=lambda x: NumpyExp_.apply(x.clone()), + inplace_variant=NumpyExp_.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulAutogradFunction', + op=NumpyMul.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyCubeNotComposableAutogradFunction', + op=lambda x: NumpyCubeNotComposable.apply(x)[0], + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortAutogradFunction', + op=NumpySort.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'NumpyTakeAutogradFunction', + op=NumpyTake.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SelectAutogradFunction', + op=Select.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'CubeGenVmapAutogradFunction', + op=CubeGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'MulGenVmapAutogradFunction', + op=MulGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SortGenVmapAutogradFunction', + op=SortGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'SelectGenVmapAutogradFunction', + op=SelectGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ScaleGradGenVmapAutogradFunction', + op=ScaleGradGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ZeroGradientsGenVmapAutogradFunction', + op=ZeroGradientsGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ForwardHasDefaultArgsAutogradFunction', + op=ForwardHasDefaultArgs.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_forward_default_args, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), +] diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..189be09d8ba96a92b90034000e79009d2b8a8b9a --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py @@ -0,0 +1,281 @@ +# mypy: ignore-errors + +r"""This file is allowed to initialize CUDA context when imported.""" + +import functools +import torch +import torch.cuda +from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS +import inspect +import contextlib +import os + + +CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized() + + +TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2 +CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None +# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN +if TEST_WITH_ROCM: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA) +else: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))) + +TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0) + +SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3)) +SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0)) +SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0)) +SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)) +SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)) +SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0)) + +IS_JETSON = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() in [(7, 2), (8, 7)]) + +def evaluate_gfx_arch_exact(matching_arch): + if not torch.cuda.is_available(): + return False + gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName + arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name) + return arch == matching_arch + +GFX90A_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-')) +GFX942_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')) + +def evaluate_platform_supports_flash_attention(): + if TEST_WITH_ROCM: + return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-') + if TEST_CUDA: + return not IS_WINDOWS and SM80OrLater + return False + +def evaluate_platform_supports_efficient_attention(): + if TEST_WITH_ROCM: + return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-') + if TEST_CUDA: + return True + return False + +PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention()) +PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_efficient_attention()) +# TODO(eqy): gate this against a cuDNN version +PLATFORM_SUPPORTS_CUDNN_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM and + torch.backends.cuda.cudnn_sdp_enabled()) +# This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate +PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION) + +PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM + +PLATFORM_SUPPORTS_BF16: bool = LazyVal(lambda: TEST_CUDA and SM80OrLater) + +if TEST_NUMBA: + try: + import numba.cuda + TEST_NUMBA_CUDA = numba.cuda.is_available() + except Exception as e: + TEST_NUMBA_CUDA = False + TEST_NUMBA = False +else: + TEST_NUMBA_CUDA = False + +# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and +# RNG have been initialized. +__cuda_ctx_rng_initialized = False + + +# after this call, CUDA context and RNG must have been initialized on each GPU +def initialize_cuda_context_rng(): + global __cuda_ctx_rng_initialized + assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng' + if not __cuda_ctx_rng_initialized: + # initialize cuda context and rng for memory tests + for i in range(torch.cuda.device_count()): + torch.randn(1, device=f"cuda:{i}") + __cuda_ctx_rng_initialized = True + + +# Test whether hardware TF32 math mode enabled. It is enabled only on: +# - CUDA >= 11 +# - arch >= Ampere +def tf32_is_not_fp32(): + if not torch.cuda.is_available() or torch.version.cuda is None: + return False + if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: + return False + if int(torch.version.cuda.split('.')[0]) < 11: + return False + return True + + +@contextlib.contextmanager +def tf32_off(): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + try: + torch.backends.cuda.matmul.allow_tf32 = False + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + + +@contextlib.contextmanager +def tf32_on(self, tf32_precision=1e-5): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + old_precision = self.precision + try: + torch.backends.cuda.matmul.allow_tf32 = True + self.precision = tf32_precision + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + self.precision = old_precision + + +# This is a wrapper that wraps a test to run this test twice, one with +# allow_tf32=True, another with allow_tf32=False. When running with +# allow_tf32=True, it will use reduced precision as specified by the +# argument. For example: +# @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128) +# @tf32_on_and_off(0.005) +# def test_matmul(self, device, dtype): +# a = ...; b = ...; +# c = torch.matmul(a, b) +# self.assertEqual(c, expected) +# In the above example, when testing torch.float32 and torch.complex64 on CUDA +# on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at +# TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced +# precision to check values. +# +# This decorator can be used for function with or without device/dtype, such as +# @tf32_on_and_off(0.005) +# def test_my_op(self) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device, dtype) +# @tf32_on_and_off(0.005) +# def test_my_op(self, dtype) +# if neither device nor dtype is specified, it will check if the system has ampere device +# if device is specified, it will check if device is cuda +# if dtype is specified, it will check if dtype is float32 or complex64 +# tf32 and fp32 are different only when all the three checks pass +def tf32_on_and_off(tf32_precision=1e-5): + def with_tf32_disabled(self, function_call): + with tf32_off(): + function_call() + + def with_tf32_enabled(self, function_call): + with tf32_on(self, tf32_precision): + function_call() + + def wrapper(f): + params = inspect.signature(f).parameters + arg_names = tuple(params.keys()) + + @functools.wraps(f) + def wrapped(*args, **kwargs): + for k, v in zip(arg_names, args): + kwargs[k] = v + cond = tf32_is_not_fp32() + if 'device' in kwargs: + cond = cond and (torch.device(kwargs['device']).type == 'cuda') + if 'dtype' in kwargs: + cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64}) + if cond: + with_tf32_disabled(kwargs['self'], lambda: f(**kwargs)) + with_tf32_enabled(kwargs['self'], lambda: f(**kwargs)) + else: + f(**kwargs) + + return wrapped + return wrapper + + +# This is a wrapper that wraps a test to run it with TF32 turned off. +# This wrapper is designed to be used when a test uses matmul or convolutions +# but the purpose of that test is not testing matmul or convolutions. +# Disabling TF32 will enforce torch.float tensors to be always computed +# at full precision. +def with_tf32_off(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + with tf32_off(): + return f(*args, **kwargs) + + return wrapped + +def _get_magma_version(): + if 'Magma' not in torch.__config__.show(): + return (0, 0) + position = torch.__config__.show().find('Magma ') + version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0] + return tuple(int(x) for x in version_str.split(".")) + +def _get_torch_cuda_version(): + if torch.version.cuda is None: + return (0, 0) + cuda_version = str(torch.version.cuda) + return tuple(int(x) for x in cuda_version.split(".")) + +def _get_torch_rocm_version(): + if not TEST_WITH_ROCM: + return (0, 0) + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + return tuple(int(x) for x in rocm_version.split(".")) + +def _check_cusparse_generic_available(): + return not TEST_WITH_ROCM + +def _check_hipsparse_generic_available(): + if not TEST_WITH_ROCM: + return False + + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1)) + + +TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available() +TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available() + +# Shared by test_torch.py and test_multigpu.py +def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + # Create a module+optimizer that will use scaling, and a control module+optimizer + # that will not use scaling, against which the scaling-enabled module+optimizer can be compared. + mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + with torch.no_grad(): + for c, s in zip(mod_control.parameters(), mod_scaling.parameters()): + s.copy_(c) + + kwargs = {"lr": 1.0} + if optimizer_kwargs is not None: + kwargs.update(optimizer_kwargs) + opt_control = optimizer_ctor(mod_control.parameters(), **kwargs) + opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs) + + return mod_control, mod_scaling, opt_control, opt_scaling + +# Shared by test_torch.py, test_cuda.py and test_multigpu.py +def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))] + + loss_fn = torch.nn.MSELoss().to(device) + + skip_iter = 2 + + return _create_scaling_models_optimizers( + device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, + ) + (data, loss_fn, skip_iter) + + +# Importing this module should NOT eagerly initialize CUDA +if not CUDA_ALREADY_INITIALIZED_ON_IMPORT: + assert not torch.cuda.is_initialized() diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py new file mode 100644 index 0000000000000000000000000000000000000000..07caa0ac3eee358f6bd1e66b9a33b435ddf9da6c --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py @@ -0,0 +1,1587 @@ +# mypy: ignore-errors + +import copy +import gc +import inspect +import runpy +import sys +import threading +from collections import namedtuple +from enum import Enum +from functools import wraps, partial +from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set +import unittest +import os +import torch +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \ + skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \ + IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, TEST_XPU, \ + _TestParametrizer, compose_parametrize_fns, dtype_name, \ + TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \ + get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE, \ + TEST_WITH_TORCHINDUCTOR +from torch.testing._internal.common_cuda import _get_torch_cuda_version, \ + TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version +from torch.testing._internal.common_dtype import get_all_dtypes + +try: + import psutil # type: ignore[import] + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + +# Note [Writing Test Templates] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# This note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# PyTorch has its own framework for instantiating test templates. That is, for +# taking test classes that look similar to unittest or pytest +# compatible test classes and optionally doing the following: +# +# - instantiating a version of the test class for each available device type +# (often the CPU, CUDA, and META device types) +# - further instantiating a version of each test that's always specialized +# on the test class's device type, and optionally specialized further +# on datatypes or operators +# +# This functionality is similar to pytest's parametrize functionality +# (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable +# additional logic that specializes the instantiated test classes for their +# device types (see CPUTestBase and CUDATestBase below), supports a variety +# of composable decorators that allow for test filtering and setting +# tolerances, and allows tests parametrized by operators to instantiate +# only the subset of device type x dtype that operator supports. +# +# This framework was built to make it easier to write tests that run on +# multiple device types, multiple datatypes (dtypes), and for multiple +# operators. It's also useful for controlling which tests are run. For example, +# only tests that use a CUDA device can be run on platforms with CUDA. +# Let's dive in with an example to get an idea for how it works: +# +# -------------------------------------------------------- +# A template class (looks like a regular unittest TestCase) +# class TestClassFoo(TestCase): +# +# # A template test that can be specialized with a device +# # NOTE: this test case is not runnable by unittest or pytest because it +# # accepts an extra positional argument, "device", that they do not understand +# def test_bar(self, device): +# pass +# +# # Function that instantiates a template class and its tests +# instantiate_device_type_tests(TestCommon, globals()) +# -------------------------------------------------------- +# +# In the above code example we see a template class and a single test template +# that can be instantiated with a device. The function +# instantiate_device_type_tests(), called at file scope, instantiates +# new test classes, one per available device type, and new tests in those +# classes from these templates. It actually does this by removing +# the class TestClassFoo and replacing it with classes like TestClassFooCPU +# and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase +# and CUDATestBase respectively. Additional device types, like XLA, +# (see https://github.com/pytorch/xla) can further extend the set of +# instantiated test classes to create classes like TestClassFooXLA. +# +# The test template, test_bar(), is also instantiated. In this case the template +# is only specialized on a device, so (depending on the available device +# types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda() +# in TestClassFooCUDA. We can think of the instantiated test classes as +# looking like this: +# +# -------------------------------------------------------- +# # An instantiated test class for the CPU device type +# class TestClassFooCPU(CPUTestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cpu(self): +# test_bar(self, 'cpu') +# +# # An instantiated test class for the CUDA device type +# class TestClassFooCUDA(CUDATestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cuda(self): +# test_bar(self, 'cuda:0') +# -------------------------------------------------------- +# +# These instantiated test classes ARE discoverable and runnable by both +# unittest and pytest. One thing that may be confusing, however, is that +# attempting to run "test_bar" will not work, despite it appearing in the +# original template code. This is because "test_bar" is no longer discoverable +# after instantiate_device_type_tests() runs, as the above snippet shows. +# Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both +# can be run with the option "-k test_bar". +# +# Removing the template class and adding the instantiated classes requires +# passing "globals()" to instantiate_device_type_tests(), because it +# edits the file's Python objects. +# +# As mentioned, tests can be additionally parametrized on dtypes or +# operators. Datatype parametrization uses the @dtypes decorator and +# require a test template like this: +# +# -------------------------------------------------------- +# # A template test that can be specialized with a device and a datatype (dtype) +# @dtypes(torch.float32, torch.int64) +# def test_car(self, device, dtype) +# pass +# -------------------------------------------------------- +# +# If the CPU and CUDA device types are available this test would be +# instantiated as 4 tests that cover the cross-product of the two dtypes +# and two device types: +# +# - test_car_cpu_float32 +# - test_car_cpu_int64 +# - test_car_cuda_float32 +# - test_car_cuda_int64 +# +# The dtype is passed as a torch.dtype object. +# +# Tests parametrized on operators (actually on OpInfos, more on that in a +# moment...) use the @ops decorator and require a test template like this: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and OpInfo +# @ops(op_db) +# def test_car(self, device, dtype, op) +# pass +# -------------------------------------------------------- +# +# See the documentation for the @ops decorator below for additional details +# on how to use it and see the note [OpInfos] in +# common_methods_invocations.py for more details on OpInfos. +# +# A test parametrized over the entire "op_db", which contains hundreds of +# OpInfos, will likely have hundreds or thousands of instantiations. The +# test will be instantiated on the cross-product of device types, operators, +# and the dtypes the operator supports on that device type. The instantiated +# tests will have names like: +# +# - test_car_add_cpu_float32 +# - test_car_sub_cuda_int64 +# +# The first instantiated test calls the original test_car() with the OpInfo +# for torch.add as its "op" argument, the string 'cpu' for its "device" argument, +# and the dtype torch.float32 for is "dtype" argument. The second instantiated +# test calls the test_car() with the OpInfo for torch.sub, a CUDA device string +# like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype +# torch.int64 for its "dtype argument." +# +# In addition to parametrizing over device, dtype, and ops via OpInfos, the +# @parametrize decorator is supported for arbitrary parametrizations: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and value for x +# @parametrize("x", range(5)) +# def test_car(self, device, dtype, x) +# pass +# -------------------------------------------------------- +# +# See the documentation for @parametrize in common_utils.py for additional details +# on this. Note that the instantiate_device_type_tests() function will handle +# such parametrizations; there is no need to additionally call +# instantiate_parametrized_tests(). +# +# Clever test filtering can be very useful when working with parametrized +# tests. "-k test_car" would run every instantiated variant of the test_car() +# test template, and "-k test_car_add" runs every variant instantiated with +# torch.add. +# +# It is important to use the passed device and dtype as appropriate. Use +# helper functions like make_tensor() that require explicitly specifying +# the device and dtype so they're not forgotten. +# +# Test templates can use a variety of composable decorators to specify +# additional options and requirements, some are listed here: +# +# - @deviceCountAtLeast() +# Passes a list of strings representing all available devices of +# the test class's device type as the test template's "device" argument. +# If there are fewer devices than the value passed to the decorator +# the test is skipped. +# - @dtypes() +# In addition to accepting multiple dtypes, the @dtypes decorator +# can accept a sequence of tuple pairs of dtypes. The test template +# will be called with each tuple for its "dtype" argument. +# - @onlyNativeDeviceTypes +# Skips the test if the device is not a native device type (currently CPU, CUDA, Meta) +# - @onlyCPU +# Skips the test if the device is not a CPU device +# - @onlyCUDA +# Skips the test if the device is not a CUDA device +# - @onlyMPS +# Skips the test if the device is not a MPS device +# - @skipCPUIfNoLapack +# Skips the test if the device is a CPU device and LAPACK is not installed +# - @skipCPUIfNoMkl +# Skips the test if the device is a CPU device and MKL is not installed +# - @skipCUDAIfNoMagma +# Skips the test if the device is a CUDA device and MAGMA is not installed +# - @skipCUDAIfRocm +# Skips the test if the device is a CUDA device and ROCm is being used + + +# Note [Adding a Device Type] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# To add a device type: +# +# (1) Create a new "TestBase" extending DeviceTypeTestBase. +# See CPUTestBase and CUDATestBase below. +# (2) Define the "device_type" attribute of the base to be the +# appropriate string. +# (3) Add logic to this file that appends your base class to +# device_type_test_bases when your device type is available. +# (4) (Optional) Write setUpClass/tearDownClass class methods that +# instantiate dependencies (see MAGMA in CUDATestBase). +# (5) (Optional) Override the "instantiate_test" method for total +# control over how your class creates tests. +# +# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF +# they are run. This makes it useful for initializing devices and dependencies. + + +# Note [Overriding methods in generic tests] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Device generic tests look a lot like normal test classes, but they differ +# from ordinary classes in some important ways. In particular, overriding +# methods in generic tests doesn't work quite the way you expect. +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# # This DOESN'T WORK! +# super().assertEqual(x, y) +# +# If you try to run this code, you'll get an error saying that TestFooDeviceType +# is not in scope. This is because after instantiating our classes, we delete +# it from the parent scope. Instead, you need to hardcode a direct invocation +# of the desired subclass call, e.g., +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# TestCase.assertEqual(x, y) +# +# However, a less error-prone way of customizing the behavior of TestCase +# is to either (1) add your functionality to TestCase and make it toggled +# by a class attribute, or (2) create your own subclass of TestCase, and +# then inherit from it for your generic test. + + +def _dtype_test_suffix(dtypes): + """ Returns the test suffix for a dtype, sequence of dtypes, or None. """ + if isinstance(dtypes, (list, tuple)): + if len(dtypes) == 0: + return '' + return '_' + '_'.join(dtype_name(d) for d in dtypes) + elif dtypes: + return f'_{dtype_name(dtypes)}' + else: + return '' + + +def _update_param_kwargs(param_kwargs, name, value): + """ Adds a kwarg with the specified name and value to the param_kwargs dict. """ + # Make name plural (e.g. devices / dtypes) if the value is composite. + plural_name = f'{name}s' + + # Clear out old entries of the arg if any. + if name in param_kwargs: + del param_kwargs[name] + if plural_name in param_kwargs: + del param_kwargs[plural_name] + + if isinstance(value, (list, tuple)): + param_kwargs[plural_name] = value + elif value is not None: + param_kwargs[name] = value + + # Leave param_kwargs as-is when value is None. + + +class DeviceTypeTestBase(TestCase): + device_type: str = 'generic_device_type' + + # Flag to disable test suite early due to unrecoverable error such as CUDA error. + _stop_test_suite = False + + # Precision is a thread-local setting since it may be overridden per test + _tls = threading.local() + _tls.precision = TestCase._precision + _tls.rel_tol = TestCase._rel_tol + + @property + def precision(self): + return self._tls.precision + + @precision.setter + def precision(self, prec): + self._tls.precision = prec + + @property + def rel_tol(self): + return self._tls.rel_tol + + @rel_tol.setter + def rel_tol(self, prec): + self._tls.rel_tol = prec + + # Returns a string representing the device that single device tests should use. + # Note: single device tests use this device exclusively. + @classmethod + def get_primary_device(cls): + return cls.device_type + + @classmethod + def _init_and_get_primary_device(cls): + try: + return cls.get_primary_device() + except Exception: + # For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available + # until setUpClass() sets it. Call that manually here if needed. + if hasattr(cls, 'setUpClass'): + cls.setUpClass() + return cls.get_primary_device() + + # Returns a list of strings representing all available devices of this + # device type. The primary device must be the first string in the list + # and the list must contain no duplicates. + # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic + # mechanism of acquiring all available devices. + @classmethod + def get_all_devices(cls): + return [cls.get_primary_device()] + + # Returns the dtypes the test has requested. + # Prefers device-specific dtype specifications over generic ones. + @classmethod + def _get_dtypes(cls, test): + if not hasattr(test, 'dtypes'): + return None + + default_dtypes = test.dtypes.get('all') + msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it" + assert default_dtypes is not None, msg + + return test.dtypes.get(cls.device_type, default_dtypes) + + def _get_precision_override(self, test, dtype): + if not hasattr(test, 'precision_overrides'): + return self.precision + return test.precision_overrides.get(dtype, self.precision) + + def _get_tolerance_override(self, test, dtype): + if not hasattr(test, 'tolerance_overrides'): + return self.precision, self.rel_tol + return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol)) + + def _apply_precision_override_for_test(self, test, param_kwargs): + dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None + dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype + if dtype: + self.precision = self._get_precision_override(test, dtype) + self.precision, self.rel_tol = self._get_tolerance_override(test, dtype) + + # Creates device-specific tests. + @classmethod + def instantiate_test(cls, name, test, *, generic_cls=None): + + def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []): + # Add the device param kwarg if the test needs device or devices. + param_kwargs = {} if param_kwargs is None else param_kwargs + test_sig_params = inspect.signature(test).parameters + if 'device' in test_sig_params or 'devices' in test_sig_params: + device_arg: str = cls._init_and_get_primary_device() + if hasattr(test, 'num_required_devices'): + device_arg = cls.get_all_devices() + _update_param_kwargs(param_kwargs, 'device', device_arg) + + # Apply decorators based on param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + # Constructs the test + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + # Sets precision and runs test + # Note: precision is reset after the test is run + guard_precision = self.precision + guard_rel_tol = self.rel_tol + try: + self._apply_precision_override_for_test(test, param_kwargs) + result = test(self, **param_kwargs) + except RuntimeError as rte: + # check if rte should stop entire test suite. + self._stop_test_suite = self._should_stop_test_suite() + # Check if test has been decorated with `@expectedFailure` + # Using `__unittest_expecting_failure__` attribute, see + # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164 + # In that case, make it fail with "unexpected success" by suppressing exception + if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite: + import sys + print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr) + return + # raise the runtime error as is for the test suite to record. + raise rte + finally: + self.precision = guard_precision + self.rel_tol = guard_rel_tol + + return result + + assert not hasattr(cls, name), f"Redefinition of test {name}" + setattr(cls, name, instantiated_test) + + def default_parametrize_fn(test, generic_cls, device_cls): + # By default, no parametrization is needed. + yield (test, '', {}, lambda _: []) + + # Parametrization decorators set the parametrize_fn attribute on the test. + parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn) + + # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it. + dtypes = cls._get_dtypes(test) + if dtypes is not None: + + def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes): + for dtype in dtypes: + param_kwargs: Dict[str, Any] = {} + _update_param_kwargs(param_kwargs, "dtype", dtype) + + # Note that an empty test suffix is set here so that the dtype can be appended + # later after the device. + yield (test, '', param_kwargs, lambda _: []) + + parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn) + + # Instantiate the parametrized tests. + for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020 + test_suffix = '' if test_suffix == '' else '_' + test_suffix + device_suffix = '_' + cls.device_type + + # Note: device and dtype suffix placement + # Special handling here to place dtype(s) after device according to test name convention. + dtype_kwarg = None + if 'dtype' in param_kwargs or 'dtypes' in param_kwargs: + dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype'] + test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}' + + instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs, + decorator_fn=decorator_fn) + + def run(self, result=None): + super().run(result=result) + # Early terminate test if _stop_test_suite is set. + if self._stop_test_suite: + result.stop() + + +class CPUTestBase(DeviceTypeTestBase): + device_type = 'cpu' + + # No critical error should stop CPU test suite + def _should_stop_test_suite(self): + return False + +class CUDATestBase(DeviceTypeTestBase): + device_type = 'cuda' + _do_cuda_memory_leak_check = True + _do_cuda_non_default_stream = True + primary_device: ClassVar[str] + cudnn_version: ClassVar[Any] + no_magma: ClassVar[bool] + no_cudnn: ClassVar[bool] + + def has_cudnn(self): + return not self.no_cudnn + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = torch.cuda.device_count() + + prim_device = cls.get_primary_device() + cuda_str = 'cuda:{0}' + non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + # has_magma shows up after cuda is initialized + t = torch.ones(1).cuda() + cls.no_magma = not torch.cuda.has_magma + + # Determines if cuDNN is available and its version + cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t) + cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version() + + # Acquires the current device as the primary (test) device + cls.primary_device = f'cuda:{torch.cuda.current_device()}' + +# See Note [Lazy Tensor tests in device agnostic testing] +lazy_ts_backend_init = False +class LazyTestBase(DeviceTypeTestBase): + device_type = 'lazy' + + def _should_stop_test_suite(self): + return False + + @classmethod + def setUpClass(cls): + import torch._lazy + import torch._lazy.metrics + import torch._lazy.ts_backend + global lazy_ts_backend_init + if not lazy_ts_backend_init: + # Need to connect the TS backend to lazy key before running tests + torch._lazy.ts_backend.init() + lazy_ts_backend_init = True + +class MPSTestBase(DeviceTypeTestBase): + device_type = 'mps' + primary_device: ClassVar[str] + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + # currently only one device is supported on MPS backend + prim_device = cls.get_primary_device() + return [prim_device] + + @classmethod + def setUpClass(cls): + cls.primary_device = 'mps:0' + + def _should_stop_test_suite(self): + return False + +class XPUTestBase(DeviceTypeTestBase): + device_type = 'xpu' + primary_device: ClassVar[str] + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + # currently only one device is supported on MPS backend + prim_device = cls.get_primary_device() + return [prim_device] + + @classmethod + def setUpClass(cls): + cls.primary_device = 'xpu:0' + + def _should_stop_test_suite(self): + return False + +class PrivateUse1TestBase(DeviceTypeTestBase): + primary_device: ClassVar[str] + device_mod = None + device_type = 'privateuse1' + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = cls.device_mod.device_count() + prim_device = cls.get_primary_device() + device_str = f'{cls.device_type}:{{0}}' + non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + cls.device_type = torch._C._get_privateuse1_backend_name() + cls.device_mod = getattr(torch, cls.device_type, None) + assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register + a module by `torch._register_device_module`.''' + cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}' + +# Adds available device-type-specific test base classes +def get_device_type_test_bases(): + # set type to List[Any] due to mypy list-of-union issue: + # https://github.com/python/mypy/issues/3351 + test_bases: List[Any] = list() + + if IS_SANDCASTLE or IS_FBCODE: + if IS_REMOTE_GPU: + # Skip if sanitizer is enabled + if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN: + test_bases.append(CUDATestBase) + else: + test_bases.append(CPUTestBase) + else: + test_bases.append(CPUTestBase) + if torch.cuda.is_available(): + test_bases.append(CUDATestBase) + + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if hasattr(device_mod, "is_available") and device_mod.is_available(): + test_bases.append(PrivateUse1TestBase) + # Disable MPS testing in generic device testing temporarily while we're + # ramping up support. + # elif torch.backends.mps.is_available(): + # test_bases.append(MPSTestBase) + + return test_bases + +device_type_test_bases = get_device_type_test_bases() + + +def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None): + # device type cannot appear in both except_for and only_for + intersect = set(except_for if except_for else []) & set(only_for if only_for else []) + assert not intersect, f"device ({intersect}) appeared in both except_for and only_for" + + if except_for: + device_type_test_bases = filter( + lambda x: x.device_type not in except_for, device_type_test_bases) + if only_for: + device_type_test_bases = filter( + lambda x: x.device_type in only_for, device_type_test_bases) + + return list(device_type_test_bases) + + +# Note [How to extend DeviceTypeTestBase to add new test device] +# The following logic optionally allows downstream projects like pytorch/xla to +# add more test devices. +# Instructions: +# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project. +# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define +# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of +# `instantiate_test` method. +# - DO NOT import common_device_type inside the file. +# `runpy.run_path` with `globals()` already properly setup the context so that +# `DeviceTypeTestBase` is already available. +# - Set a top-level variable `TEST_CLASS` equal to your new class. +# E.g. TEST_CLASS = XLATensorBase +# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path +# to this file. Multiple paths can be separated by `:`. +# See pytorch/xla/test/pytorch_test_base.py for a more detailed example. +_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None) +if _TORCH_TEST_DEVICES: + for path in _TORCH_TEST_DEVICES.split(':'): + # runpy (a stdlib module) lacks annotations + mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value] + device_type_test_bases.append(mod['TEST_CLASS']) + + +PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1' + +PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR' +PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR' +PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY = 'PYTORCH_TESTING_DEVICE_FOR_CUSTOM' + + +def get_desired_device_type_test_bases(except_for=None, only_for=None, include_lazy=False, allow_mps=False): + # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy` + test_bases = device_type_test_bases.copy() + if allow_mps and TEST_MPS and MPSTestBase not in test_bases: + test_bases.append(MPSTestBase) + if only_for == 'xpu' and TEST_XPU and XPUTestBase not in test_bases: + test_bases.append(XPUTestBase) + # Filter out the device types based on user inputs + desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for) + if include_lazy: + # Note [Lazy Tensor tests in device agnostic testing] + # Right now, test_view_ops.py runs with LazyTensor. + # We don't want to opt every device-agnostic test into using the lazy device, + # because many of them will fail. + # So instead, the only way to opt a specific device-agnostic test file into + # lazy tensor testing is with include_lazy=True + if IS_FBCODE: + print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr) + else: + desired_device_type_test_bases.append(LazyTestBase) + + def split_if_not_empty(x: str): + return x.split(",") if x else [] + + # run some cuda testcases on other devices if available + # Usage: + # export PYTORCH_TESTING_DEVICE_FOR_CUSTOM=privateuse1 + env_custom_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY, '')) + if env_custom_only_for: + desired_device_type_test_bases += filter(lambda x: x.device_type in env_custom_only_for, test_bases) + desired_device_type_test_bases = list(set(desired_device_type_test_bases)) + + # Filter out the device types based on environment variables if available + # Usage: + # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu + # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla + env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, '')) + env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, '')) + + return filter_desired_device_types(desired_device_type_test_bases, env_except_for, env_only_for) + + + +# Adds 'instantiated' device-specific test cases to the given scope. +# The tests in these test cases are derived from the generic tests in +# generic_test_class. This function should be used instead of +# instantiate_parametrized_tests() if the test class contains +# device-specific tests (NB: this supports additional @parametrize usage). +# +# See note "Writing Test Templates" +def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False): + # Removes the generic test class from its enclosing scope so its tests + # are not discoverable. + del scope[generic_test_class.__name__] + + # Creates an 'empty' version of the generic_test_class + # Note: we don't inherit from the generic_test_class directly because + # that would add its tests to our test classes and they would be + # discovered (despite not being runnable). Inherited methods also + # can't be removed later, and we can't rely on load_tests because + # pytest doesn't support it (as of this writing). + empty_name = generic_test_class.__name__ + "_base" + empty_class = type(empty_name, generic_test_class.__bases__, {}) + + # Acquires members names + # See Note [Overriding methods in generic tests] + generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys()) + generic_tests = [x for x in generic_members if x.startswith('test')] + + # Creates device-specific test cases + for base in get_desired_device_type_test_bases(except_for, only_for, include_lazy, allow_mps): + class_name = generic_test_class.__name__ + base.device_type.upper() + + # type set to Any and suppressed due to unsupport runtime class: + # https://github.com/python/mypy/wiki/Unsupported-Python-Features + device_type_test_class: Any = type(class_name, (base, empty_class), {}) + + for name in generic_members: + if name in generic_tests: # Instantiates test member + test = getattr(generic_test_class, name) + # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls) + sig = inspect.signature(device_type_test_class.instantiate_test) + if len(sig.parameters) == 3: + # Instantiates the device-specific tests + device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class) + else: + device_type_test_class.instantiate_test(name, copy.deepcopy(test)) + else: # Ports non-test member + assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}" + nontest = getattr(generic_test_class, name) + setattr(device_type_test_class, name, nontest) + + # The dynamically-created test class derives from the test template class + # and the empty class. Arrange for both setUpClass and tearDownClass methods + # to be called. This allows the parameterized test classes to support setup + # and teardown. + @classmethod + def _setUpClass(cls): + base.setUpClass() + empty_class.setUpClass() + + @classmethod + def _tearDownClass(cls): + empty_class.tearDownClass() + base.tearDownClass() + + device_type_test_class.setUpClass = _setUpClass + device_type_test_class.tearDownClass = _tearDownClass + + # Mimics defining the instantiated class in the caller's file + # by setting its module to the given class's and adding + # the module to the given scope. + # This lets the instantiated class be discovered by unittest. + device_type_test_class.__module__ = generic_test_class.__module__ + scope[class_name] = device_type_test_class + + +# Category of dtypes to run an OpInfo-based test for +# Example use: @ops(dtype=OpDTypes.supported) +# +# There are 5 categories: +# - supported: Every dtype supported by the operator. Use for exhaustive +# testing of all dtypes. +# - unsupported: Run tests on dtypes not supported by the operator. e.g. for +# testing the operator raises an error and doesn't crash. +# - supported_backward: Every dtype supported by the operator's backward pass. +# - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass. +# - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the +# operator supports in both forward and backward. +# - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test +# when this is selected. +class OpDTypes(Enum): + supported = 0 # Test all supported dtypes (default) + unsupported = 1 # Test only unsupported dtypes + supported_backward = 2 # Test all supported backward dtypes + unsupported_backward = 3 # Test only unsupported backward dtypes + any_one = 4 # Test precisely one supported dtype + none = 5 # Instantiate no dtype variants (no dtype kwarg needed) + any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu + + +# Arbitrary order +ANY_DTYPE_ORDER = ( + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, + torch.float16, + torch.bfloat16, + torch.long, + torch.int32, + torch.int16, + torch.int8, + torch.uint8, + torch.bool +) + +def _serialize_sample(sample_input): + # NB: For OpInfos, SampleInput.summary() prints in a cleaner way. + if getattr(sample_input, "summary", None) is not None: + return sample_input.summary() + return str(sample_input) + +# Decorator that defines the OpInfos a test template should be instantiated for. +# +# Example usage: +# +# @ops(unary_ufuncs) +# def test_numerics(self, device, dtype, op): +# +# +# This will instantiate variants of test_numerics for each given OpInfo, +# on each device the OpInfo's operator supports, and for every dtype supported by +# that operator. There are a few caveats to the dtype rule, explained below. +# +# The @ops decorator can accept two +# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified +# then the test variants are instantiated for those dtypes, regardless of +# what the operator supports. If given "allowed_dtypes" then test variants +# are instantiated only for the intersection of allowed_dtypes and the dtypes +# they would otherwise be instantiated with. That is, allowed_dtypes composes +# with the options listed above and below. +# +# The "dtypes" argument can also accept additional values (see OpDTypes above): +# OpDTypes.supported - the test is instantiated for all dtypes the operator +# supports +# OpDTypes.unsupported - the test is instantiated for all dtypes the operator +# doesn't support +# OpDTypes.supported_backward - the test is instantiated for all dtypes the +# operator's gradient formula supports +# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the +# operator's gradient formula doesn't support +# OpDTypes.any_one - the test is instantiated for one dtype the +# operator supports. The dtype supports forward and backward if possible. +# OpDTypes.none - the test is instantiated without any dtype. The test signature +# should not include a dtype kwarg in this case. +# +# These options allow tests to have considerable control over the dtypes +# they're instantiated for. + +class ops(_TestParametrizer): + def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported, + allowed_dtypes: Optional[Sequence[torch.dtype]] = None, skip_if_dynamo=True): + self.op_list = list(op_list) + self.opinfo_dtypes = dtypes + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.skip_if_dynamo = skip_if_dynamo + + def _parametrize_test(self, test, generic_cls, device_cls): + """ Parameterizes the given test function across each op and its associated dtypes. """ + if device_cls is None: + raise RuntimeError('The @ops decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + op = check_exhausted_iterator = object() + for op in self.op_list: + # Determine the set of dtypes to use. + dtypes: Union[Set[torch.dtype], Set[None]] + if isinstance(self.opinfo_dtypes, Sequence): + dtypes = set(self.opinfo_dtypes) + elif self.opinfo_dtypes == OpDTypes.unsupported_backward: + dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported_backward: + dtypes = op.supported_backward_dtypes(device_cls.device_type) + elif self.opinfo_dtypes == OpDTypes.unsupported: + dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported: + dtypes = set(op.supported_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.any_one: + # Tries to pick a dtype that supports both forward or backward + supported = op.supported_dtypes(device_cls.device_type) + supported_backward = op.supported_backward_dtypes(device_cls.device_type) + supported_both = supported.intersection(supported_backward) + dtype_set = supported_both if len(supported_both) > 0 else supported + for dtype in ANY_DTYPE_ORDER: + if dtype in dtype_set: + dtypes = {dtype} + break + else: + dtypes = {} + elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one: + # Tries to pick a dtype that supports both CPU and CUDA + supported = set(op.dtypes).intersection(op.dtypesIfCUDA) + if supported: + dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)} + else: + dtypes = {} + + elif self.opinfo_dtypes == OpDTypes.none: + dtypes = {None} + else: + raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}") + + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = op.formatted_name + + for dtype in dtypes: + # Construct parameter kwargs to pass to the test. + param_kwargs = {'op': op} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + + # NOTE: test_wrapper exists because we don't want to apply + # op-specific decorators to the original test. + # Test-specific decorators are applied to the original test, + # however. + try: + @wraps(test) + def test_wrapper(*args, **kwargs): + try: + return test(*args, **kwargs) + except unittest.SkipTest as e: + raise e + except Exception as e: + tracked_input = get_tracked_input() + if PRINT_REPRO_ON_FAILURE and tracked_input is not None: + raise Exception( # noqa: TRY002 + f"Caused by {tracked_input.type_desc} " + f"at index {tracked_input.index}: " + f"{_serialize_sample(tracked_input.val)}") from e + raise e + finally: + clear_tracked_input() + + if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR: + test_wrapper = skipIfTorchDynamo("Policy: we don't run OpInfo tests w/ Dynamo")(test_wrapper) + + # Initialize info for the last input seen. This is useful for tracking + # down which inputs caused a test failure. Note that TrackedInputIter is + # responsible for managing this. + test.tracked_input = None + + decorator_fn = partial(op.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for op {op.name}!") + raise ex + if op is check_exhausted_iterator: + raise ValueError('An empty op_list was passed to @ops. ' + 'Note that this may result from reuse of a generator.') + +# Decorator that skips a test if the given condition is true. +# Notes: +# (1) Skip conditions stack. +# (2) Skip conditions can be bools or strings. If a string the +# test base must have defined the corresponding attribute to be False +# for the test to run. If you want to use a string argument you should +# probably define a new decorator instead (see below). +# (3) Prefer the existing decorators to defining the 'device_type' kwarg. +class skipIf: + + def __init__(self, dep, reason, device_type=None): + self.dep = dep + self.reason = reason + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def dep_fn(slf, *args, **kwargs): + if self.device_type is None or self.device_type == slf.device_type: + if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep): + raise unittest.SkipTest(self.reason) + + return fn(slf, *args, **kwargs) + return dep_fn + + +# Skips a test on CPU if the condition is true. +class skipCPUIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cpu') + + +# Skips a test on CUDA if the condition is true. +class skipCUDAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cuda') + +# Skips a test on Lazy if the condition is true. +class skipLazyIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='lazy') + +# Skips a test on Meta if the condition is true. +class skipMetaIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='meta') + +# Skips a test on MPS if the condition is true. +class skipMPSIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='mps') + +# Skips a test on XLA if the condition is true. +class skipXLAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='xla') + +class skipPRIVATEUSE1If(skipIf): + + def __init__(self, dep, reason): + device_type = torch._C._get_privateuse1_backend_name() + super().__init__(dep, reason, device_type=device_type) + +def _has_sufficient_memory(device, size): + if torch.device(device).type == 'cuda': + if not torch.cuda.is_available(): + return False + gc.collect() + torch.cuda.empty_cache() + # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU + if device == 'cuda': + device = 'cuda:0' + return torch.cuda.memory.mem_get_info(device)[0] >= size + + if device == 'xla': + raise unittest.SkipTest('TODO: Memory availability checks for XLA?') + + if device != 'cpu': + raise unittest.SkipTest('Unknown device type') + + # CPU + if not HAS_PSUTIL: + raise unittest.SkipTest('Need psutil to determine if memory is sufficient') + + # The sanitizers have significant memory overheads + if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN: + effective_size = size * 10 + else: + effective_size = size + + if psutil.virtual_memory().available < effective_size: + gc.collect() + return psutil.virtual_memory().available >= effective_size + + +def largeTensorTest(size, device=None): + """Skip test if the device has insufficient memory to run the test + + size may be a number of bytes, a string of the form "N GB", or a callable + + If the test is a device generic test, available memory on the primary device will be checked. + It can also be overriden by the optional `device=` argument. + In other tests, the `device=` argument needs to be specified. + """ + if isinstance(size, str): + assert size.endswith(('GB', 'gb')), "only bytes or GB supported" + size = 1024 ** 3 * int(size[:-2]) + + def inner(fn): + @wraps(fn) + def dep_fn(self, *args, **kwargs): + size_bytes = size(self, *args, **kwargs) if callable(size) else size + _device = device if device is not None else self.get_primary_device() + if not _has_sufficient_memory(_device, size_bytes): + raise unittest.SkipTest(f'Insufficient {_device} memory') + + return fn(self, *args, **kwargs) + return dep_fn + return inner + + +class expectedFailure: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def efail_fn(slf, *args, **kwargs): + if not hasattr(slf, "device_type") and hasattr(slf, "device") and isinstance(slf.device, str): + target_device_type = slf.device + else: + target_device_type = slf.device_type + + if self.device_type is None or self.device_type == target_device_type: + try: + fn(slf, *args, **kwargs) + except Exception: + return + else: + slf.fail('expected test to fail, but it passed') + + return fn(slf, *args, **kwargs) + return efail_fn + + +class onlyOn: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def only_fn(slf, *args, **kwargs): + if self.device_type != slf.device_type: + reason = f"Only runs on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(slf, *args, **kwargs) + + return only_fn + + +# Decorator that provides all available devices of the device type to the test +# as a list of strings instead of providing a single device string. +# Skips the test if the number of available devices of the variant's device +# type is less than the 'num_required_devices' arg. +class deviceCountAtLeast: + + def __init__(self, num_required_devices): + self.num_required_devices = num_required_devices + + def __call__(self, fn): + assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}" + fn.num_required_devices = self.num_required_devices + + @wraps(fn) + def multi_fn(slf, devices, *args, **kwargs): + if len(devices) < self.num_required_devices: + reason = f"fewer than {self.num_required_devices} devices detected" + raise unittest.SkipTest(reason) + + return fn(slf, devices, *args, **kwargs) + + return multi_fn + +# Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1) +def onlyNativeDeviceTypes(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in NATIVE_DEVICES: + reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +# Specifies per-dtype precision overrides. +# Ex. +# +# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's precision will be set to the +# corresponding override, if it exists. +# self.precision can be accessed directly, and it also controls the behavior of +# functions like self.assertEqual(). +# +# Note that self.precision is a scalar value, so if you require multiple +# precisions (or are working with multiple dtypes) they should be specified +# explicitly and computed using self.precision (e.g. +# self.precision *2, max(1, self.precision)). +class precisionOverride: + + def __init__(self, d): + assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!" + for dtype in d.keys(): + assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}" + + self.d = d + + def __call__(self, fn): + fn.precision_overrides = self.d + return fn + +# Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over +# precisionOverride. +# Ex. +# +# @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3}, +# torch.double : tol{atol=1e-4, rtol = 0}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's tolerance will be set to the +# corresponding override, if it exists. +# self.rtol and self.precision can be accessed directly, and they also control +# the behavior of functions like self.assertEqual(). +# +# The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and +# atol = 1e-4 and rtol = 0 for torch.double. +tol = namedtuple('tol', ['atol', 'rtol']) + +class toleranceOverride: + def __init__(self, d): + assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!" + for dtype, prec in d.items(): + assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}" + assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!" + + self.d = d + + def __call__(self, fn): + fn.tolerance_overrides = self.d + return fn + +# Decorator that instantiates a variant of the test for each given dtype. +# Notes: +# (1) Tests that accept the dtype argument MUST use this decorator. +# (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU +# or dtypesIfCUDA. +# (3) Can accept an iterable of dtypes or an iterable of tuples +# of dtypes. +# Examples: +# @dtypes(torch.float32, torch.float64) +# @dtypes((torch.long, torch.float32), (torch.int, torch.float64)) +class dtypes: + + def __init__(self, *args, device_type="all"): + if len(args) > 0 and isinstance(args[0], (list, tuple)): + for arg in args: + assert isinstance(arg, (list, tuple)), \ + "When one dtype variant is a tuple or list, " \ + "all dtype variants must be. " \ + f"Received non-list non-tuple dtype {str(arg)}" + assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}" + else: + assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}" + + self.args = args + self.device_type = device_type + + def __call__(self, fn): + d = getattr(fn, 'dtypes', {}) + assert self.device_type not in d, f"dtypes redefinition for {self.device_type}" + d[self.device_type] = self.args + fn.dtypes = d + return fn + + +# Overrides specified dtypes on the CPU. +class dtypesIfCPU(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cpu') + + +# Overrides specified dtypes on CUDA. +class dtypesIfCUDA(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cuda') + +class dtypesIfMPS(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='mps') + +class dtypesIfPRIVATEUSE1(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name()) + +def onlyCPU(fn): + return onlyOn('cpu')(fn) + + +def onlyCUDA(fn): + return onlyOn('cuda')(fn) + + +def onlyMPS(fn): + return onlyOn('mps')(fn) + + +def onlyXPU(fn): + return onlyOn('xpu')(fn) + +def onlyPRIVATEUSE1(fn): + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if device_mod is None: + reason = f"Skip as torch has no module of {device_type}" + return unittest.skip(reason)(fn) + return onlyOn(device_type)(fn) + +def onlyCUDAAndPRIVATEUSE1(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()): + reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +def disablecuDNN(fn): + + @wraps(fn) + def disable_cudnn(self, *args, **kwargs): + if self.device_type == 'cuda' and self.has_cudnn(): + with torch.backends.cudnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_cudnn + +def disableMkldnn(fn): + + @wraps(fn) + def disable_mkldnn(self, *args, **kwargs): + if torch.backends.mkldnn.is_available(): + with torch.backends.mkldnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_mkldnn + + +def expectedFailureCPU(fn): + return expectedFailure('cpu')(fn) + + +def expectedFailureCUDA(fn): + return expectedFailure('cuda')(fn) + +def expectedFailureXPU(fn): + return expectedFailure('xpu')(fn) + +def expectedFailureMeta(fn): + return skipIfTorchDynamo()(expectedFailure('meta')(fn)) + +def expectedFailureXLA(fn): + return expectedFailure('xla')(fn) + +# Skips a test on CPU if LAPACK is not available. +def skipCPUIfNoLapack(fn): + return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn) + + +# Skips a test on CPU if FFT is not available. +def skipCPUIfNoFFT(fn): + return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn) + + +# Skips a test on CPU if MKL is not available. +def skipCPUIfNoMkl(fn): + return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows). +def skipCPUIfNoMklSparse(fn): + return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if mkldnn is not available. +def skipCPUIfNoMkldnn(fn): + return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn) + + +# Skips a test on CUDA if MAGMA is not available. +def skipCUDAIfNoMagma(fn): + return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn)) + +def has_cusolver(): + return not TEST_WITH_ROCM + +def has_hipsolver(): + rocm_version = _get_torch_rocm_version() + # hipSOLVER is disabled on ROCM < 5.3 + return rocm_version >= (5, 3) + +# Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available +def skipCUDAIfNoCusolver(fn): + return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn) + + +# Skips a test if both cuSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoCusolver(fn): + if has_cusolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoLinalgsolver(fn): + if has_cusolver() or has_hipsolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipCUDAIfRocm: {msg}" + return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn) + if func: + return dec_fn(func) + return dec_fn + +# Skips a test on CUDA when not using ROCm. +def skipCUDAIfNotRocm(fn): + return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn) + +# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested. +def skipCUDAIfRocmVersionLessThan(version=None): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if not TEST_WITH_ROCM: + reason = "ROCm not available" + raise unittest.SkipTest(reason) + rocm_version_tuple = _get_torch_rocm_version() + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfNotMiopenSuggestNHWC(fn): + return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn) + +# Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s. +def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version in (versions or []): + reason = f"test skipped for CUDA version {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test for CUDA versions less than specified, given in the form of [major, minor]. +def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version < versions: + reason = f"test skipped for CUDA versions < {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested. +def skipCUDAIfCudnnVersionLessThan(version=0): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if self.no_cudnn: + reason = "cuDNN not available" + raise unittest.SkipTest(reason) + if self.cudnn_version is None or self.cudnn_version < version: + reason = f"cuDNN version {self.cudnn_version} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuSparse generic API is not available +def skipCUDAIfNoCusparseGeneric(fn): + return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn) + +def skipCUDAIfNoHipsparseGeneric(fn): + return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn) + +def skipCUDAIfNoSparseGeneric(fn): + return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn) + +def skipCUDAIfNoCudnn(fn): + return skipCUDAIfCudnnVersionLessThan(0)(fn) + +def skipCUDAIfMiopen(fn): + return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn) + +def skipCUDAIfNoMiopen(fn): + return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn)) + +def skipLazy(fn): + return skipLazyIf(True, "test doesn't work with lazy tensors")(fn) + +def skipMeta(fn): + return skipMetaIf(True, "test doesn't work with meta tensors")(fn) + +def skipXLA(fn): + return skipXLAIf(True, "Marked as skipped for XLA")(fn) + +def skipMPS(fn): + return skipMPSIf(True, "test doesn't work on MPS backend")(fn) + +def skipPRIVATEUSE1(fn): + return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn) + +# TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now. +# This should probably enumerate all available device type test base classes. +def get_all_device_types() -> List[str]: + return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda'] diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py new file mode 100644 index 0000000000000000000000000000000000000000..e7bce5c37f3d95fc664b50764e9b9756b89c6cd0 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py @@ -0,0 +1,111 @@ +# mypy: ignore-errors + +# Owner(s): ["oncall: distributed"] + +from typing import Tuple + +import torch +import torch.nn as nn + + +class UnitModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.seq(self.l1(x))) + + +class CompositeModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.u2(self.u1(self.l1(x)))) + + +class UnitParamModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + + def forward(self, x): + return torch.mm(self.seq(self.l(x)), self.p) + + +class CompositeParamModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + self.register_buffer( + "buffer", torch.randn((100, 100), device=device), persistent=True + ) + + def forward(self, x): + a = self.u2(self.u1(self.l(x))) + b = self.p + return torch.mm(a, b) + + +class FakeSequential(nn.Module): + # Define this class to achieve a desired nested wrapping using the module + # wrap policy with `nn.Sequential` + def __init__(self, *modules: Tuple[nn.Module, ...]) -> None: + super().__init__() + self._module_sequence = list(modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for module in self._module_sequence: + x = module(x) + return x + + +class NestedSequentialModel(nn.Module): + def __init__(self, device: torch.device) -> None: + super().__init__() + # This nested structure exercises traversal order to catch differences + # between valid traversals (e.g. BFS and DFS variations). + self.seq1 = nn.Sequential( + nn.Linear(1, 1, device=device), + FakeSequential( + nn.Linear(1, 1, device=device), + nn.ReLU(), + FakeSequential( + nn.Linear(1, 1, device=device), + ), + nn.ReLU(), + ), + nn.Linear(1, 2, device=device), + ) + self.lin = nn.Linear(2, 2, device=device) + self.seq2 = nn.Sequential( + nn.ReLU(), + nn.Linear(2, 3, device=device), + FakeSequential( + nn.Linear(3, 2, bias=False, device=device), + nn.Linear(2, 4, bias=False, device=device), + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.seq2(self.lin(self.seq1(x))) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..473e5c35e07a171cec84edb75ec5292d8b0df45f --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py @@ -0,0 +1,1401 @@ +# mypy: ignore-errors + +import abc +import faulthandler +import itertools +import logging +import multiprocessing +import os +import queue +import subprocess +import sys +import tempfile +import threading +import time +import traceback +import types +import unittest +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import timedelta +from enum import Enum +from functools import partial, reduce, wraps +from io import StringIO +from typing import Dict, NamedTuple, Optional, Union, List, Any, Callable, Tuple +from unittest.mock import patch + +import torch +import torch._dynamo.test_case +import torch.cuda.nccl +import torch.distributed as c10d +import torch.nn as nn +from torch.testing._internal.common_utils import ( + FILE_SCHEMA, + find_free_port, + IS_SANDCASTLE, + retry_on_connect_failures, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, + TEST_WITH_ROCM, + TEST_WITH_TSAN, + TestCase, + run_tests, +) +from torch.testing._internal.distributed.multi_threaded_pg import ( + _install_threaded_pg, + _uninstall_threaded_pg, + ProcessLocalGroup, +) +import operator + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class TestSkip(NamedTuple): + exit_code: int + message: str + + +TEST_SKIPS = { + "backend_unavailable": TestSkip( + 72, "Skipped because distributed backend is not available." + ), + "small_worldsize": TestSkip(73, "Skipped due to small world size."), + "odd_worldsize": TestSkip(87, "Skipped due to odd world size."), + "no_cuda": TestSkip(74, "CUDA is not available."), + "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), + "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), + "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), + "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), + "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), + "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), + "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), + "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), + "nccl": TestSkip(76, "c10d not compiled with NCCL support"), + "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), + "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), + "generic": TestSkip( + 86, "Test skipped at subprocess level, look at subprocess log for skip reason" + ), + "importerror": TestSkip(88, "Test skipped due to missing import"), +} + + +@dataclass +class DistTestCases: + # Backends that do not support a specific collective + skip_collective = {} + skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"} + skip_collective["reduce"] = set() + skip_collective["sendrecv anysource"] = {"nccl", "ucc"} + skip_collective["cpu barrier"] = {"nccl", "ucc"} + + # Sets showing that something is implemented + backend_feature = {} + backend_feature["gpu"] = {"nccl", "gloo", "ucc"} + backend_feature["cuda"] = {"nccl", "gloo", "ucc"} + backend_feature["ddp"] = {"nccl", "gloo", "ucc"} + backend_feature["subgroup"] = {"nccl", "gloo", "ucc"} + backend_feature["plugin"] = set() + + +def skip_if_no_gpu(func): + """Skips if the world size exceeds the number of GPUs, ensuring that if the + test is run, each rank has its own GPU via ``torch.cuda.device(rank)``.""" + + @wraps(func) + def wrapper(*args, **kwargs): + if not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + world_size = int(os.environ["WORLD_SIZE"]) + if torch.cuda.device_count() < world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_small_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2: + sys.exit(TEST_SKIPS["small_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_odd_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1: + sys.exit(TEST_SKIPS["odd_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def require_n_gpus_for_nccl_backend(n, backend): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < n: + sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def import_transformers_or_skip(): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + from transformers import ( # noqa: F401 + AutoModelForMaskedLM, + BertConfig, + ) + + return func(*args, **kwargs) + except ImportError: + sys.exit(TEST_SKIPS["importerror"].exit_code) + + return wrapper + + return decorator + + +def skip_if_lt_x_gpu(x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +# This decorator helps avoiding initializing cuda while testing other backends +def nccl_skip_if_lt_x_gpu(backend, x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend != "nccl": + return func(*args, **kwargs) + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +def verify_ddp_error_logged(model_DDP, err_substr): + # Verify error was logged in ddp_logging_data. + ddp_logging_data = model_DDP._get_ddp_logging_data() + assert "iteration" in ddp_logging_data + assert "has_error" in ddp_logging_data + assert "error" in ddp_logging_data + logging_err = ddp_logging_data["error"] + # Remove C++ stacktrace if needed. + actual = ( + err_substr + if err_substr.find("\nException raised from ") == -1 + else err_substr.split("\nException raised from ")[0] + ) + assert ( + actual in logging_err + ), f"Did not find expected {actual} in ddp logging data error: {logging_err}" + + +def with_nccl_blocking_wait(func): + """ + Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of + this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for + the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and + TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING + try: + cached_nccl_async_error_handling: Union[str, None] = os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] + del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] + except KeyError: + # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset + cached_nccl_async_error_handling = None + + # Save val of TORCH_NCCL_BLOCKING_WAIT and set it. + try: + cached_nccl_blocking_wait: Union[str, None] = os.environ[ + "TORCH_NCCL_BLOCKING_WAIT" + ] + except KeyError: + cached_nccl_blocking_wait = None + finally: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + + try: + ret = func(*args, **kwargs) + return ret + finally: + # restore old values. + if cached_nccl_async_error_handling is not None: + os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] = cached_nccl_async_error_handling + + if cached_nccl_blocking_wait is not None: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait + + return wrapper + + +def with_dist_debug_levels(levels): + """ + Runs a test for each distributed debug level specified in levels. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) + for level in levels: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = level + c10d.set_debug_level_from_env() + ret = func(*args, **kwargs) + c10d.barrier() + if old_level is not None: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level + # Only returns test return for last test, but since these are + # unittests the return value is not really used and earlier tests + # would've raised had they failed. + return ret + + return wrapper + + return decorator + + +def requires_gloo(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_gloo_available(), + "c10d was not compiled with the Gloo backend", + ) + + +def requires_nccl_version(version, msg): + if not c10d.is_nccl_available(): + return skip_but_pass_in_sandcastle( + "c10d was not compiled with the NCCL backend", + ) + else: + return skip_but_pass_in_sandcastle_if( + torch.cuda.nccl.version() < version, + f"Requires NCCL version greater than or equal to: {version}, found: {torch.cuda.nccl.version()}, reason: {msg}", + ) + + +def requires_nccl(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_nccl_available(), + "c10d was not compiled with the NCCL backend", + ) + +def requires_ucc(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_ucc_available(), + "c10d was not compiled with the UCC backend", + ) + +def requires_mpi(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_mpi_available(), + "c10d was not compiled with the MPI backend", + ) + + +def skip_if_rocm(func): + """Skips a test for ROCm""" + func.skip_if_rocm = True + + @wraps(func) + def wrapper(*args, **kwargs): + if not TEST_WITH_ROCM: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS["skipIfRocm"].exit_code) + + return wrapper + + +def skip_if_win32(): + return skip_but_pass_in_sandcastle_if( + sys.platform == "win32", + "This unit test case is not supported on Windows platform", + ) + + +@retry_on_connect_failures +def create_tcp_store( + addr="localhost", + world_size=1, + is_master=True, + timeout=timedelta(minutes=5), + wait_for_workers=True, + jit_class=False, + use_libuv=True, +): + """ + Creates a TCP store. Retries if the chosen port is already in use. + """ + port = find_free_port() + if jit_class: + timeout_millisecond = int(timeout / timedelta(milliseconds=1)) + return torch.classes.dist_c10d.TCPStore( + addr, port, world_size, is_master, timeout_millisecond + ) + else: + return c10d.TCPStore( + addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv + ) + + +if TEST_WITH_TSAN: + # TSAN runs much slower. + TIMEOUT_DEFAULT = 500 +else: + TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300')) +TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} + + +# https://github.com/pytorch/pytorch/issues/75665 +if TEST_WITH_ROCM: + TIMEOUT_OVERRIDE["test_join_kwargs"] = 200 + + +def create_device(interface=None): + if sys.platform == "win32" or interface is None: + return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") + else: + return c10d.ProcessGroupGloo.create_device(interface=interface) + + +def get_timeout(test_id) -> int: + return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT) + + +@contextmanager +def captured_output(): + new_out, new_err = StringIO(), StringIO() + old_out, old_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_out, new_err + yield sys.stdout, sys.stderr + finally: + sys.stdout, sys.stderr = old_out, old_err + + +def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): + """ + Generate a number of basic test cases for sparse reduction. + These cover tensors with a varying number of sparse dimensions and a varying + number of dense dimensions. The only reduction operation we support is sum. + """ + + def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): + # First sparse dimension is [0..rank]. + # Subsequent dimensions are always 0, so we know there is + # a non-empty intersection between any two sparse tensors. + indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) + shape = [world_size] + [2 for _ in range(dense_dims)] + for _ in range(sparse_dims - 1): + indices = torch.cat((indices, torch.zeros(1, rank + 1))) + shape.append(world_size) + values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) + return torch.sparse_coo_tensor(indices, values, shape) + + def compute_sum(fn, world_size: int): + return reduce( + operator.add, [fn(rank, world_size) for rank in range(world_size)] + ) + + return [ + ( + [ + fn(num_inputs * rank + i, num_inputs * world_size) + for i in range(num_inputs) + ], + [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)], + ) + for fn in [ + partial(generate, sparse_dims=1), + partial(generate, sparse_dims=2), + partial(generate, sparse_dims=3), + partial(generate, dense_dims=1), + partial(generate, dense_dims=2), + partial(generate, dense_dims=3), + ] + ] + + +# HELPER FOR MULTIGPU TESTS +def init_multigpu_helper(world_size: int, backend: str): + """Multigpu tests are designed to simulate the multi nodes with multi + GPUs on each node. Nccl backend requires equal #GPUs in each process. + On a single node, all visible GPUs are evenly + divided to subsets, each process only uses a subset. + """ + nGPUs = torch.cuda.device_count() + visible_devices = range(nGPUs) + + # If rank is less than or equal to number of available GPU's + # then each rank can be mapped to corresponding GPU. + nGPUs_per_process = 1 + if world_size > nGPUs: + nGPUs_per_process = nGPUs // world_size + rank_to_GPU = { + i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]) + for i in range(world_size) + } + return rank_to_GPU + + +tmp_dir: Optional[tempfile.TemporaryDirectory] = None + + +def initialize_temp_directories(init_method: Optional[str] = None) -> None: + global tmp_dir + tmp_dir = tempfile.TemporaryDirectory() + os.environ["TEMP_DIR"] = tmp_dir.name + os.mkdir(os.path.join(tmp_dir.name, "barrier")) + os.mkdir(os.path.join(tmp_dir.name, "test_dir")) + init_dir_path = os.path.join(tmp_dir.name, "init_dir") + os.mkdir(init_dir_path) + # Set init method if specified. + if init_method is not None: + os.environ["INIT_METHOD"] = init_method + else: + os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( + init_dir_path, "shared_init_file" + ) + + +def cleanup_temp_dir() -> None: + if tmp_dir is not None: + tmp_dir.cleanup() + + +# Most tests operate with this worldsize +DEFAULT_WORLD_SIZE = 4 + +# [How does MultiProcessTestCase work?] +# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by +# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an +# example which inherits from this class. Its `Setup()` methods calls into +# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` +# subprocesses. During the spawn, the main process passes the test name to +# subprocesses, and the name is acquired from self.id(). The subprocesses +# then use the provided test function name to retrieve the function attribute +# from the test instance and run it. The main process simply waits for all +# subprocesses to join. + + +class MultiProcessTestCase(TestCase): + MAIN_PROCESS_RANK = -1 + # This exit code is used to indicate that the test code had an error and + # exited abnormally. There are certain tests that might use sys.exit() to + # simulate failures and in those cases, we can't have an exit code of 0, + # but we still want to ensure we didn't run into any other errors. + TEST_ERROR_EXIT_CODE = 10 + + # do not early terminate for distributed tests. + def _should_stop_test_suite(self) -> bool: + return False + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_PROCESS_RANK: + self._join_processes(fn) + else: + fn() + + return types.MethodType(wrapper, self) + + # The main process spawns N subprocesses that run the test. + # Constructor patches current instance test method to + # assume the role of the main process and join its subprocesses, + # or run the underlying test function. + def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None: + # methodName is the correct naming in unittest and testslide uses keyword arguments. + # So we need to use both to 1) not break BC and, 2) support testslide. + if methodName != "runTest": + method_name = methodName + super().__init__(method_name) + fn = getattr(self, method_name) + setattr(self, method_name, self.join_or_run(fn)) + + def setUp(self) -> None: + super().setUp() + self.skip_return_code_checks = [] # type: ignore[var-annotated] + self.processes = [] # type: ignore[var-annotated] + self.rank = self.MAIN_PROCESS_RANK + self.file_name = tempfile.NamedTemporaryFile(delete=False).name + # pid to pipe consisting of error message from process. + self.pid_to_pipe = {} # type: ignore[var-annotated] + + def tearDown(self) -> None: + super().tearDown() + for p in self.processes: + p.terminate() + # Each Process instance holds a few open file descriptors. The unittest + # runner creates a new TestCase instance for each test method and keeps + # it alive until the end of the entire suite. We must thus reset the + # processes to prevent an effective file descriptor leak. + self.processes = [] + + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def _start_processes(self, proc) -> None: + self.processes = [] + for rank in range(int(self.world_size)): + parent_conn, child_conn = torch.multiprocessing.Pipe() + process = proc( + target=self.__class__._run, + name="process " + str(rank), + args=(rank, self._current_test_name(), self.file_name, child_conn), + ) + process.start() + logger.info("Started process %s with pid %s", rank, process.pid) + self.pid_to_pipe[process.pid] = parent_conn + self.processes.append(process) + + def _spawn_processes(self) -> None: + proc = torch.multiprocessing.get_context("spawn").Process + self._start_processes(proc) + + class Event(Enum): + GET_TRACEBACK = 1 + + @staticmethod + def _event_listener(parent_pipe, signal_pipe, rank: int): + logger.info("Starting event listener thread for rank %s", rank) + while True: + ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) + + if parent_pipe in ready_pipes: + + if parent_pipe.closed: + logger.info( + "Pipe closed for process %s, stopping event listener thread", rank + ) + return + + event = parent_pipe.recv() + logger.info("Received event %s on process %s", event, rank) + + if event == MultiProcessTestCase.Event.GET_TRACEBACK: + # Return traceback to the parent process. + with tempfile.NamedTemporaryFile(mode="r+") as tmp_file: + faulthandler.dump_traceback(tmp_file) + # Flush buffers and seek to read from the beginning + tmp_file.flush() + tmp_file.seek(0) + parent_pipe.send(tmp_file.read()) + + logger.info("Process %s sent traceback", rank) + + if signal_pipe in ready_pipes: + return + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + def run_test(self, test_name: str, parent_pipe) -> None: + # Start event listener thread. + signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) + event_listener_thread = threading.Thread( + target=MultiProcessTestCase._event_listener, + args=(parent_pipe, signal_recv_pipe, self.rank), + daemon=True, + ) + event_listener_thread.start() + if sys.platform != "win32" and sys.platform != "darwin": + # Register signal handler to dump stack traces on FATALs. + # Windows and MacOS do not support the signal handlers. + torch._C._set_print_stack_traces_on_fatal_signal(True) + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + # self.id() == e.g. '__main__.TestDistributed.test_get_rank' + # We're retrieving a corresponding test and executing it. + try: + getattr(self, test_name)() + except unittest.SkipTest as se: + logger.info( + "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) + ) + sys.exit(TEST_SKIPS["generic"].exit_code) + except Exception as e: + logger.error( + "Caught exception: \n%s exiting " + "process %s with exit code: %s", + traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ) + # Send error to parent process. + parent_pipe.send(traceback.format_exc()) + sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) + finally: + if signal_send_pipe is not None: + signal_send_pipe.send(None) + + assert event_listener_thread is not None + event_listener_thread.join() + # Close pipe after done with test. + parent_pipe.close() + + def _get_timedout_process_traceback(self) -> None: + pipes = [] + for i, process in enumerate(self.processes): + if process.exitcode is None: + pipe = self.pid_to_pipe[process.pid] + try: + pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) + pipes.append((i, pipe)) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", i, e + ) + + # Wait for results. + for rank, pipe in pipes: + try: + # Wait for traceback + if pipe.poll(5): + if pipe.closed: + logger.info( + "Pipe closed for process %s, cannot retrieve traceback", rank + ) + continue + + traceback = pipe.recv() + logger.error( + "Process %s timed out with traceback: \n\n%s", rank, traceback + ) + else: + logger.error( + "Could not retrieve traceback for timed out process: %s", rank + ) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", rank, e + ) + + def _join_processes(self, fn) -> None: + timeout = get_timeout(self.id()) + start_time = time.time() + subprocess_error = False + try: + while True: + # check to see if any subprocess exited with an error early. + for (i, p) in enumerate(self.processes): + # This is the exit code processes exit with if they + # encountered an exception. + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: + print( + f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes." + ) + active_children = torch.multiprocessing.active_children() + for ac in active_children: + ac.terminate() + subprocess_error = True + break + if subprocess_error: + break + # All processes have joined cleanly if they all a valid exitcode + if all(p.exitcode is not None for p in self.processes): + break + # Check if we should time out the test. If so, we terminate each process. + elapsed = time.time() - start_time + if elapsed > timeout: + self._get_timedout_process_traceback() + print( + f"Timing out after {timeout} seconds and killing subprocesses." + ) + for p in self.processes: + p.terminate() + break + # Sleep to avoid excessive busy polling. + time.sleep(0.1) + + elapsed_time = time.time() - start_time + + if fn in self.skip_return_code_checks: + self._check_no_test_errors(elapsed_time) + else: + self._check_return_codes(elapsed_time) + finally: + # Close all pipes + for pipe in self.pid_to_pipe.values(): + pipe.close() + + def _check_no_test_errors(self, elapsed_time) -> None: + """ + Checks that we didn't have any errors thrown in the child processes. + """ + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} timed out after {elapsed_time} seconds" + ) + self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) + + def _check_return_codes(self, elapsed_time) -> None: + """ + Checks that the return codes of all spawned processes match, and skips + tests if they returned a return code indicating a skipping condition. + """ + # If no processes are spawned, there is nothing to check. + if not self.processes: + logger.warning("Note: no subprocesses were spawned, test was likely skipped.") + return + + first_process = self.processes[0] + # first, we check if there are errors in actual processes + # (via TEST_ERROR_EXIT CODE), and raise an exception for those. + # the reason we do this is to attempt to raise a more helpful error + # message than "Process x terminated/timed out" + # TODO: we should pipe the exception of the failed subprocess here. + # Currently, the actual exception is displayed as a logging output. + errored_processes = [ + (i, p) + for i, p in enumerate(self.processes) + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ] + if errored_processes: + error = "" + for i, process in errored_processes: + # Get error from pipe. + error_message = self.pid_to_pipe[process.pid].recv() + error += ( + f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} " + f"and exception:\n{error_message}\n" + ) + + raise RuntimeError(error) + # If no process exited uncleanly, we check for timeouts, and then ensure + # each process exited cleanly. + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} terminated or timed out after {elapsed_time} seconds" + ) + self.assertEqual( + p.exitcode, + first_process.exitcode, + msg=f"Expect process {i} exit code to match Process 0 exit code of {first_process.exitcode}, but got {p.exitcode}", + ) + for skip in TEST_SKIPS.values(): + if first_process.exitcode == skip.exit_code: + if IS_SANDCASTLE: + # Don't use unittest.skip to skip the test on sandcastle + # since it creates tasks for skipped tests assuming there + # is some follow-up needed. Instead just "pass" the test + # with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + self.assertEqual( + first_process.exitcode, + 0, + msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", + ) + + @property + def is_master(self) -> bool: + return self.rank == 0 + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + torch._dynamo.reset() + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + torch._dynamo.reset() + c10d.barrier() + + +# Cannot use functools.cache as it requires python 3.9 +EFA_PROBE_RESULT = None + + +def has_efa() -> bool: + """ + If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has + Libfabric EFA interfaces and EFA software components installed, + see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html. + """ + global EFA_PROBE_RESULT + if EFA_PROBE_RESULT is not None: + return EFA_PROBE_RESULT + + try: + EFA_PROBE_RESULT = ( + subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0 + ) + except FileNotFoundError: + EFA_PROBE_RESULT = False + return EFA_PROBE_RESULT + + +def tp_transports(): + """ + If the machine has Libfabric EFA interfaces and EFA software components installed it may cause + 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe + uses InfiniBand transport, so we exclude it from tensorpipe transports, + see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022 + """ + return ["shm", "uv"] if has_efa() else None + + +def spawn_threads_and_init_comms( + func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE +): + """ + Wrapper to use with a test method + """ + if func is None: + return partial( + spawn_threads_and_init_comms, timeout=timeout, world_size=world_size + ) + + + def _run_test_method_with_multi_threads(world_size, callback): + world = _install_threaded_pg() + global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + def worker(rank, world_pg, store): + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=store + ) + try: + callback() + except BaseException as ex: + # Exceptions are handled in MultiThreadedTestCase + MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + if world_is_valid(): + c10d.destroy_process_group() + + threads = [] + for rank in range(world_size): + t = threading.Thread(target=worker, args=(rank, world, global_store)) + t.start() + threads.append(t) + + return threads + + + @wraps(func) + def wrapper(self, *args, **kwargs): + # TODO: get test name from kwargs + torch._C._distributed_c10d._set_thread_isolation_mode(True) + try: + threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs)) + # join and error handling + MultiThreadedTestCase._join_threads(threads, func) + finally: + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + return wrapper + + +class MultiThreadedTestCase(TestCase): + """ + Test runner that runs all tests with the in-proc process group using + multiple threads with the threaded process group. + + Each test spawns world_size threads and run the test method in each thread. + + Difference from regular MultiProcess test runner: + Must explicitly defines SetUp and call self._spawn_threads() to run the tests. + Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown) + to set up / tear down each thread when running each test. + No global state possible + How bad of a limitation is this? + """ + exception_queue = queue.Queue() + + MAIN_THREAD_RANK = -1 + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_THREAD_RANK: + self._join_threads(self.threads, fn) + else: + fn() + + return types.MethodType(wrapper, self) + + def __init__(self, method_name: str = "runTest") -> None: + super().__init__(method_name) + test_fn = getattr(self, method_name, None) + setattr(self, method_name, self.join_or_run(test_fn)) + + def perThreadSetUp(self): + # super().setUp() # TestCase.setUp() calls torch.manual_seed() + pass + + def perThreadTearDown(self): + pass + + def setUp(self) -> None: + """ + setUp only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadSetUp + """ + super().setUp() + self.rank = self.MAIN_THREAD_RANK + self.threads = [] + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + def tearDown(self): + """ + tearDown only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadTearDown + """ + super().tearDown() + self.threads = [] + + def _spawn_threads(self): + """ + class method to spawn threads and run test, use this method in the SetUp of your TestCase + """ + torch._C._distributed_c10d._set_thread_isolation_mode(True) + test_name = self._current_test_name + # for each test case, we need to create thread local world, and a global store + world = _install_threaded_pg() + self.__class__.global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + if not world_is_valid(): + raise RuntimeError("Invalid world") + + for rank in range(self.world_size): + t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size)) + t.start() + self.threads.append(t) + + @classmethod + def _run(cls, test_name, rank, world_size): + self = cls(test_name) + self.rank = rank + + # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make + # every thread have the same value. This would be relevant when we use op db tests, where it + # needs those states to be set i.e. using instantiate_device_type_tests() + # TODO: figure out a better way to do this + if hasattr(self, "_tls"): + self._tls = threading.local() + self._tls.precision = TestCase._precision + self._tls.rel_tol = TestCase._rel_tol + + self.run_test_with_threaded_pg(test_name, rank, world_size) + + def run_test_with_threaded_pg(self, test_name, rank, world_size): + """ + Run the current test associated with `test_name` using the threaded process group. + """ + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store + ) + self.perThreadSetUp() + + try: + getattr(self, test_name)() + except BaseException as ex: + self.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + c10d.destroy_process_group() + self.perThreadTearDown() + + + @classmethod + def _join_threads(cls, threads, fn): + timeout = TIMEOUT_DEFAULT + try: + for idx, thread in enumerate(threads): + thread.join(max(0, timeout)) + if thread.is_alive(): + MultiThreadedTestCase.exception_queue.put( + ( + idx, + ( + TimeoutError, + TimeoutError( + f"Rank failed to join in under {timeout} seconds" + ), + None, + ), + ) + ) + ProcessLocalGroup.reset() + failed_ranks = [] + while not cls.exception_queue.empty(): + failure = cls.exception_queue.get() + failed_ranks.append(failure) + finally: + _uninstall_threaded_pg() + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + cls._check_return_codes(failed_ranks, timeout, fn) + + @classmethod + def _check_return_codes(cls, failed_ranks, timeout, fn): + # Print based on exceptions raised from threads + # SkipTest: print info for each thread + # TimeoutError: raise RuntimeError for any timed out thread + # Normal Exception: print error for each thread that raises exception + # and raise a RuntimeError + error_msg = "" + skip_code = -1 + for rank, exc_info in failed_ranks: + exc = exc_info[1] + if isinstance(exc, unittest.SkipTest): + logger.info( + "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) + ) + if skip_code < 0: + skip_code = TEST_SKIPS["generic"].exit_code + elif isinstance(exc, TimeoutError): + msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" + logger.error(msg) + raise RuntimeError(msg) + elif isinstance(exc, Exception): + msg = "".join(traceback.format_exception(*exc_info)) + logger.error( + "Caught exception: \n%s exiting thread %s", msg, rank + ) + error_msg += ( + f"Thread {rank} exited with exception:\n{msg}\n" + ) + elif isinstance(exc, SystemExit): + if type(exc.code) == int and skip_code < 0: + skip_code = exc.code + + # check exceptions + if len(error_msg) > 0: + raise RuntimeError(error_msg) + # check skip + if skip_code > 0: + for skip in TEST_SKIPS.values(): + if skip_code == skip.exit_code: + if IS_SANDCASTLE: + # "pass" the test with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", fn, skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + @property + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def assertEqualOnRank(self, x, y, msg=None, *, rank=0): + """ + The reason why we have this util function instead of + self.assertEqual is all threads are sharing one CPU RNG + so the assertion result is only reliable on rank 0 + """ + if self.rank == rank: + self.assertEqual(x, y, msg) + + def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0): + if self.rank == rank: + self.assertNotEqual(x, y) + + +class SaveForwardInputsModule(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.l = nn.Linear(100, 100) + self.forward_inputs = forward_inputs + self.cast_forward_inputs = cast_forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x) + + +class SaveForwardInputsModel(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.forward_inputs = forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.c2(self.c1(x)) + +@contextmanager +def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True): + # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase, + # Just manually implement the most important part of the dynamo behavior to reset/clear. + torch.cuda.set_device(rank) + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '6789' + if init_pg: + c10d.init_process_group("nccl", rank=rank, world_size=world_size) + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + try: + yield + finally: + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + if init_pg: + c10d.destroy_process_group() + + +class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase): + """ + Test harness for single-process dynamo distributed tests, + initializes dist process group. + + Prefer this for simple tests, as it's easier to debug. + """ + + @classmethod + def setUpClass(cls): + super().setUpClass() + # _exit_stack is set up in TestCase + cls._exit_stack.enter_context( + patch.dict( + os.environ, + { + "MASTER_ADDR": "localhost", + "MASTER_PORT": "12355", + }, + ) + ) + cls.rank = 0 + cls.device = f"cuda:{cls.rank}" + cls.device_ids = None if "cuda" in cls.device else [cls.rank] + c10d.init_process_group("nccl", rank=cls.rank, world_size=1) + + @classmethod + def tearDownClass(cls): + c10d.destroy_process_group() + super().tearDownClass() + + +class DynamoDistributedMultiProcTestCase(MultiProcessTestCase): + """ + Use this for tests that actually run on multiple GPUs. + + Decorate tests with @skip_if_lt_x_gpu(ngpu) + + Note: MultiProcTestCase spawns processes per test and is slow. + Prefer MultiThreadedTestCase for most tests. Perhaps use this one + sparingly for integration tests. + """ + def setUp(self): + super().setUp() + self._spawn_processes() + + def tearDown(self): + super().tearDown() + try: + os.remove(self.file_name) + except OSError: + pass + + @property + def world_size(self) -> int: + return torch.cuda.device_count() + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + # The rest is copypasta from MultiProcessTestCase._run + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + +class MultiProcContinousTest(TestCase): + # Class variables: + # number of test processes + world_size: int = 2 + # rank of the current process + rank: int = -1 # unset state + # Rendezvous file + rdvz_file: Optional[str] = None + + @classmethod + @abc.abstractmethod + def backend_str(cls) -> str: + """ + ProcessGroup backend str. + To be customized by sub test classes, e.g. "nccl". + Here we raise error. + """ + raise NotImplementedError("Please implement backend_str in your test class") + + @classmethod + def opts(cls, high_priority_stream=False): + """ + ProcessGroup init options. + To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest + Here we return None. + """ + return None + + @classmethod + def setUpClass(cls): + """ + Class-scope test fixture. Run once for entire test class, before any test starts. + Set up the process group. + """ + super().setUpClass() + if not 0 <= cls.rank < cls.world_size: + raise RuntimeError( + "Rank must be set and in the range of 0 to world_size. " + f"World size: {cls.world_size} Rank: {cls.rank}" + ) + if cls.rdvz_file: + store = c10d.FileStore(cls.rdvz_file, cls.world_size) + else: + # torchrun takes care of rendezvous + store = None + opts = cls.opts() + backend = cls.backend_str() + print(f"Testing {backend=}") + # create nccl processgroup with opts + c10d.init_process_group( + backend=backend, + world_size=cls.world_size, + rank=cls.rank, + store=store, + pg_options=opts, + ) + cls.pg = c10d.distributed_c10d._get_default_group() + print(f"Rank {cls.rank} setup complete") + + @classmethod + def tearDownClass(cls): + """ + Class-scope test fixture. Run once for entire test class, after all tests finish. + Tear down the process group. + """ + c10d.destroy_process_group() + super().tearDownClass() + # Clear up the rendezvous file + if cls.rdvz_file: + try: + os.remove(cls.rdvz_file) + except OSError: + pass + print(f"Rank {cls.rank} teardown complete") + + @classmethod + def run_rank( + cls, + rank: int, + world_size: int, + rdvz_file: Optional[str] = None, + ): + """ + This is an entry point for each rank to run the tests in `MultiProcContinousTest`. + In this entry point, we set the class variables for the test class. + Then we run all tests. + + Note: + - This helper only works for a subclass of `MultiProcContinousTest`. + + Example: + - See `test_c10d_ops_nccl.py`. + """ + # set class variables for the test class + cls.rank = rank + cls.world_size = world_size + cls.rdvz_file = rdvz_file + # Launch tests via `common_utils` infra + run_tests() diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..4c3f8484ed31097fda2e7058fe73866e2cd4ff96 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py @@ -0,0 +1,134 @@ +# mypy: ignore-errors + +from typing import List + +import torch + + +# Functions and classes for describing the dtypes a function supports +# NOTE: these helpers should correspond to PyTorch's C++ dispatch macros + +# Verifies each given dtype is a torch.dtype +def _validate_dtypes(*dtypes): + for dtype in dtypes: + assert isinstance(dtype, torch.dtype) + return dtypes + +# class for tuples corresponding to a PyTorch dispatch macro +class _dispatch_dtypes(tuple): + def __add__(self, other): + assert isinstance(other, tuple) + return _dispatch_dtypes(tuple.__add__(self, other)) + +_empty_types = _dispatch_dtypes(()) +def empty_types(): + return _empty_types + +_floating_types = _dispatch_dtypes((torch.float32, torch.float64)) +def floating_types(): + return _floating_types + +_floating_types_and_half = _floating_types + (torch.half,) +def floating_types_and_half(): + return _floating_types_and_half + +def floating_types_and(*dtypes): + return _floating_types + _validate_dtypes(*dtypes) + +_floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble) +def floating_and_complex_types(): + return _floating_and_complex_types + +def floating_and_complex_types_and(*dtypes): + return _floating_and_complex_types + _validate_dtypes(*dtypes) + +_double_types = _dispatch_dtypes((torch.float64, torch.complex128)) +def double_types(): + return _double_types + +# NB: Does not contain uint16/uint32/uint64 for BC reasons +_integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)) +def integral_types(): + return _integral_types + +def integral_types_and(*dtypes): + return _integral_types + _validate_dtypes(*dtypes) + +_all_types = _floating_types + _integral_types +def all_types(): + return _all_types + +def all_types_and(*dtypes): + return _all_types + _validate_dtypes(*dtypes) + +_complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble)) +def complex_types(): + return _complex_types + +def complex_types_and(*dtypes): + return _complex_types + _validate_dtypes(*dtypes) + +_all_types_and_complex = _all_types + _complex_types +def all_types_and_complex(): + return _all_types_and_complex + +def all_types_and_complex_and(*dtypes): + return _all_types_and_complex + _validate_dtypes(*dtypes) + +_all_types_and_half = _all_types + (torch.half,) +def all_types_and_half(): + return _all_types_and_half + +def custom_types(*dtypes): + """Create a list of arbitrary dtypes""" + return _empty_types + _validate_dtypes(*dtypes) + +# The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro + +# See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. +def get_all_dtypes(include_half=True, + include_bfloat16=True, + include_bool=True, + include_complex=True, + include_complex32=False, + include_qint=False, + ) -> List[torch.dtype]: + dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16) + if include_bool: + dtypes.append(torch.bool) + if include_complex: + dtypes += get_all_complex_dtypes(include_complex32) + if include_qint: + dtypes += get_all_qint_dtypes() + return dtypes + +def get_all_math_dtypes(device) -> List[torch.dtype]: + return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'), + include_bfloat16=False) + get_all_complex_dtypes() + +def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]: + return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128] + + +def get_all_int_dtypes() -> List[torch.dtype]: + return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] + + +def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]: + dtypes = [torch.float32, torch.float64] + if include_half: + dtypes.append(torch.float16) + if include_bfloat16: + dtypes.append(torch.bfloat16) + return dtypes + + +def get_all_qint_dtypes() -> List[torch.dtype]: + return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4] + + +float_to_corresponding_complex_type_map = { + torch.float16: torch.complex32, + torch.float32: torch.complex64, + torch.float64: torch.complex128, +} diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5fdc613c2e23b299287531e0ef6c5a853d038d --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py @@ -0,0 +1,1509 @@ +# mypy: allow-untyped-defs +# Owner(s): ["oncall: distributed"] + +import contextlib +import os +import re +import sys +import warnings +from abc import ABC, abstractmethod +from contextlib import nullcontext +from copy import deepcopy +from enum import auto, Enum +from functools import wraps +from typing import ( + Any, + Callable, + Dict, + List, + no_type_check, + Optional, + Tuple, + Type, + Union, +) +from unittest import mock + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.distributed._composable import checkpoint +from torch.distributed._composable.fsdp import fully_shard +from torch.distributed._composable.fsdp._fsdp_param_group import ( + FSDPParamGroup, + RegisterPostBackwardFunction, +) +from torch.distributed._tensor import distribute_tensor, DTensor, Shard +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP +from torch.distributed.fsdp._common_utils import TrainingState +from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + BackwardPrefetch, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler +from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + parallelize_module, + RowwiseParallel, + SequenceParallel, +) +from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer +from torch.nn.parallel.distributed import DistributedDataParallel as DDP +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + run_subtests, + TEST_SKIPS, +) +from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms +from torch.utils._triton import has_triton + + +class FSDPInitMode(Enum): + # No FSDP wrapping + NO_FSDP = auto() + # FSDP recursive wrapping + RECURSIVE = auto() + # TODO: FSDP non-recursive wrapping + # NONRECURSIVE = auto() + + +class CUDAInitMode(Enum): + # Move model to CUDA before passing to the FSDP constructor + CUDA_BEFORE = auto() + # Move model to CUDA after passing to the FSDP constructor + CUDA_AFTER = auto() + # Keep on CPU + CUDA_NEVER = auto() + + +class FSDPTestModel(nn.Module, ABC): + """This defines the interface expected from all models used commonly for + FSDP unit tests.""" + + @abstractmethod + def get_input(self, device) -> Tuple[torch.Tensor, ...]: + """Returns an input for the model as as tuple.""" + ... + + @abstractmethod + def get_loss(self, input, output) -> torch.Tensor: + """Returns the loss given the input and output.""" + ... + + @abstractmethod + def run_backward(self, loss) -> None: + """Runs the backward pass (e.g. including ``loss.backward()``).""" + ... + + @staticmethod + @abstractmethod + def init(*args: Any, **kwargs: Any) -> nn.Module: + """Initializes an instance of this model.""" + ... + + +def _assert_module_states( + model: nn.Module, + process_group: dist.ProcessGroup, + assert_fn: Callable, +): + """ + All-gathers module states across ranks and calls ``assert_fn`` on each pair + of corresponding states from rank 0 and a nonzero rank. For example, if + ``assert_fn`` is ``self.assertEqual()``, then this checks that all module + states are equal across ranks. + """ + # Include names for debugging convenience + named_module_states = [ + (param_name, param.detach().cpu()) + for param_name, param in model.named_parameters() + ] + named_module_states += [ + (buffer_name, buffer.detach().cpu()) + for buffer_name, buffer in model.named_buffers() + ] + world_size = dist.get_world_size(process_group) + olist = [None for _ in range(world_size)] + dist.all_gather_object(olist, named_module_states, group=process_group) + rank0_states = olist[0] + assert rank0_states is not None # mypy + for state in olist[1:]: + assert state is not None # mypy + for (_, p1), (_, p2) in zip(rank0_states, state): + assert_fn(p1, p2) + + +def _zero_model( + model: nn.Module, + zero_buffers: bool = False, + summon_full=True, +): + """Zeros the parameters and optionally buffers of ``model`` in place.""" + ctx = FSDP.summon_full_params(model) if summon_full else nullcontext() + with ctx: + for param in model.parameters(): + with torch.no_grad(): + param.zero_() + if zero_buffers: + for buffer in model.buffers(): + with torch.no_grad(): + buffer.zero_() + + +def _get_state_dict(model, cpu_offload=False, half=False): + if not cpu_offload: + model = model.cuda() + if half: + model.half() + + return model.state_dict() + + +def subtest_name(test_name_mapping, *args): + return "_".join( + [test_name_mapping[str(s)] if s is not None else "none" for s in args] + ) + + +def _broadcast_state_dict(rank, state_dict): + # For non-FSDP roots, some parts of the model state on rank 0 may + # not be on CPU, so we move everything to CPU to avoid issues like: + # https://github.com/pytorch/pytorch/issues/77113. + for param_name, param in state_dict.items(): + if param.device != torch.device("cpu"): + state_dict[param_name] = param.cpu() + + olist = [state_dict if rank == 0 else None] + dist.broadcast_object_list(olist) + state_dict = olist[0] + # Ensure that the state is on CUDA + for param_name in state_dict.keys(): + state_dict[param_name] = state_dict[param_name].cuda() + return state_dict + + +def get_full_params(model: nn.Module, recurse: bool = True): + """ + Returns the full unsharded parameters of ``model``. Any FSDP-managed + parameters offloaded to CPU are moved to GPU in the returned list. + + Args: + recurse (bool): If ``False``, only unshards the parameters immediate to + ``model``; if ``True``, recurses through the module hierarchy + rooted at ``model``. + """ + with FSDP.summon_full_params(model, recurse=recurse): + return deepcopy(list(model.parameters())) + + +def _maybe_cuda(model: nn.Module, move_to_cuda: bool): + return model.cuda() if move_to_cuda else model + + +def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs): + return model if not wrap_fsdp else FSDP(model, *args, **kwargs) + + +class DummyProcessGroup: + def __init__(self, rank: int, size: int): + self._rank = rank + self._size = size + + def rank(self) -> int: + return self._rank + + def size(self) -> int: + return self._size + + def allreduce(self, *args, **kwargs): + dist_wait = mock.Mock() + + def get_future(): + future: torch.futures.Future = torch.futures.Future() + future.set_result(1) + return future + + dist_wait.get_future = get_future + return dist_wait + + +class TransformerWithSharedParams(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + cuda_init_mode: CUDAInitMode, + add_bn: bool, + deterministic: bool, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + if deterministic: + torch.manual_seed(0) + d_vocab = 23 + d_model = 16 + + self.embed_tokens = nn.Embedding(d_vocab, d_model) + self.transformer = nn.Transformer( + d_model=d_model, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=8, + dropout=0.1, + ) + self.output_proj = nn.Linear(d_model, d_vocab) + + # share the embedding and output projection weights + self.output_proj.weight = self.embed_tokens.weight + self.register_buffer( + "vocab_bias", self.embed_tokens.weight.new_ones((d_model,)) + ) + self.register_buffer( + "long_buffer", + torch.zeros_like(self.vocab_bias, dtype=torch.long), + ) # type: ignore[arg-type] + + self.bs = 2 + self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity() + if cuda_init_mode == CUDAInitMode.CUDA_BEFORE: + self = self.cuda() + if deterministic: + self.eval() + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + src = torch.arange(12, device=device).view(6, self.bs) # T x B + tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B + return (src, tgt) + + def forward(self, src_ids, tgt_ids): + src = self.embed_tokens(src_ids) + src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator] + tgt = self.embed_tokens(tgt_ids) + tgt = self.bn(tgt) + x = self.transformer(src, tgt) + return self.output_proj(x) + + def get_loss(self, input, output): + _, tgt = input + return nn.functional.cross_entropy( + output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum" + ) + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + add_bn: bool = True, + ) -> Union[nn.Module, FSDP]: + """ + Initializes a :class:`TransformerWithSharedParams` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps with + top-level FSDP. By default, the top-level FSDP uses the + ``ModuleWrapPolicy`` for encoder and decoder layers, but a + different auto wrap policy may be specified via + ``fsdp_kwargs``. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + add_bn (bool): Whether to include batch norm in the model. + """ + + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + if isinstance(group, tuple): + pg = group[0] + else: + pg = group + return TransformerWithSharedParams( + pg, cuda_init_mode, add_bn, deterministic + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Default to the `ModuleWrapPolicy` + if "auto_wrap_policy" not in fsdp_kwargs: + auto_wrap_policy = ModuleWrapPolicy( + { + TransformerEncoderLayer, + TransformerDecoderLayer, + } + ) + else: + auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy") + + if ( + "sharding_strategy" in fsdp_kwargs + and fsdp_kwargs["sharding_strategy"] + in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2} + and not isinstance(group, tuple) + ): + fsdp_pg = None + else: + fsdp_pg = group + + if isinstance(group, tuple): + tformer_pg = group[0] + else: + tformer_pg = group + + m = TransformerWithSharedParams( + tformer_pg, cuda_init_mode, add_bn, deterministic + ) + fsdp_model = FSDP( + m, + fsdp_pg, + auto_wrap_policy=auto_wrap_policy, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + def get_ignored_modules(self): + return [self.transformer] + + +class NestedWrappedModule(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ) + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + return (torch.rand(4, 8, device=device),) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = output.sum() + return loss + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ) -> nn.Module: + """ + Initializes a :class:`NestedWrappedModule` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP but not the top-level module. The model may + later be wrapped with a top-level FSDP external to this method + if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return NestedWrappedModule( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = NestedWrappedModule( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class AlwaysWrapNestedWrappedModule(NestedWrappedModule): + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this + wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap + policy. + """ + model = super( + AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule + ).init( + group=group, + fsdp_init_mode=FSDPInitMode.NO_FSDP, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + ) + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + fsdp_kwargs = fsdp_kwargs or {} + fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + + +class NonUniformReqGradNWM(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super(NestedWrappedModule, self).__init__() + # This `__init__` only differs from `NestedWrappedModule.__init__` in that + # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential` + # container. This arrangement results in all elements of the last two parameters + # residing on a single rank. Freezing all parameters except those two allows us + # to verify that `ShardedGradScaler` accommodates situations where some ranks + # have no (non-zero sized) parameter shards. + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap( + nn.Sequential( + _maybe_cuda(nn.Linear(16, 4), move_to_cuda), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ), + ), + ) + + @staticmethod + def _set_nonuniform_req_grad(model, req_grad_mask) -> None: + for n, p in model.named_parameters(): + if not re.match(req_grad_mask, n): + p.requires_grad_(False) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential` + container to enable the desired non-uniform ``requires_grad`` + ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP`` + init modes, freezes all parameters except the last two to validate + ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in + FSDP ``use_orig_params=True`` mode. + """ + # The parameters that should remain unfrozen are in `module.2.1`. The regex + # pattern below matches the relevant parameter names both with and without + # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present. + req_grad_pattern = re.compile(r"module\.2.*\.1.*") + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + ddp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern) + return ddp_model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + if fsdp_kwargs is None: + fsdp_kwargs = {} + fsdp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern) + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class ModuleWithDelay(FSDPTestModel): + """This class wraps a :class:`FSDPTestModel` to optionally add a delay + after computing the loss and/or before the gradient reduction.""" + + def __init__( + self, + module: nn.Module, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + ): + super().__init__() + self.delay_after_loss_ms = delay_after_loss_ms + self.delay_before_reduction_ms = delay_before_reduction_ms + self.module = module + + def get_input(self, device): + return self.module.get_input(device) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = self.module.get_loss(input, output) + if self.delay_after_loss_ms > 0: + torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms())) + return loss + + def run_backward(self, loss): + orig_reduce_scatter = torch.distributed.reduce_scatter_tensor + + def _delayed_reduce_scatter(*args, **kwargs): + if self.delay_before_reduction_ms > 0: + torch.cuda._sleep( + int(self.delay_before_reduction_ms * get_cycles_per_ms()) + ) + return orig_reduce_scatter(*args, **kwargs) + + with mock.patch( + "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter + ): + self.module.run_backward(loss) + + @staticmethod + def init( + module_class: Type[FSDPTestModel], + *model_args: Any, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + **model_kwargs: Any, + ): + """ + Args: + module_class (Type[FSDPTestModel]): Wrapped module class to which + to add delays. + model_args: Positional arguments forwarded to the ``module_class`` + ``init()``. + delay_after_loss_ms (int): Delay after computing the loss/before + the optimizer step (in ms). + delay_before_reduction_ms (int): Delay before reduce-scattering + gradients (in ms). + model_kwargs: Keyword arguments forwarded to the ``module_class`` + ``init()``. + """ + return ModuleWithDelay( + module_class.init(*model_args, **model_kwargs), + delay_after_loss_ms, + delay_before_reduction_ms, + ) + + +class NestedWrappedModuleWithDelay(ModuleWithDelay): + @staticmethod + def init( # type: ignore[override] + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_after_loss_ms: int = 0, + delay_before_reduction_ms: int = 0, + ): + return ModuleWithDelay.init( + NestedWrappedModule, + group=group, + fsdp_init_mode=fsdp_init_mode, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + delay_after_loss_ms=delay_after_loss_ms, + delay_before_reduction_ms=delay_before_reduction_ms, + ) + + +class DummyDDP(nn.Module): + def __init__(self, module): + super().__init__() + self.module = module + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + +class MixtureOfExperts(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + delay_before_free_ms: int, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__( + group=group, + wrap_fsdp=wrap_fsdp, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + self.group = group + self.delay_before_free_ms = delay_before_free_ms + self.wrap_fsdp = wrap_fsdp + self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + if deterministic: + # Give each rank different expert parameters + torch.manual_seed(42 + self.rank) + d_expert = 23 + d_shared = 12 + d_input = 8 + expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda) + + self.num_expert_params = sum(p.numel() for p in expert.parameters()) + for p in expert.parameters(): + p.expert = True # type: ignore[attr-defined] + + if deterministic: + # Keep all other parameters the same across ranks + torch.manual_seed(0) + + shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda) + + if wrap_fsdp: + # we create a process group of size 1 for the expert params + expert_group = torch.distributed.new_group( + [group.rank()] + ) # world size 1 means no shard + expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment] + shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment] + + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda), + shared, + expert, + _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda), + ) + + def forward(self, x): + if self.delay_before_free_ms > 0: + expert = self.module[2] + if isinstance(expert, FSDP): + orig_reshard = torch.distributed.fsdp._runtime_utils._reshard + + def _delayed_reshard(*args, **kwargs): + torch.cuda._sleep( + int(self.delay_before_free_ms * get_cycles_per_ms()) + ) + return orig_reshard(*args, **kwargs) + + # This patch covers any `import torch..._reshard` uses. + with mock.patch( + "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard + ): + return self.module(x) + + return self.module(x) + + def run_backward(self, loss): + loss.backward() + # Manually reduce gradients if not wrapped in FullyShardedDataParallel + if not self.wrap_fsdp: + with torch.no_grad(): + for p in self.parameters(): + if hasattr(p, "expert"): + continue # these params don't need grad reduction + if p.grad is not None: + p.grad.div_(self.world_size) + torch.distributed.all_reduce(p.grad, group=self.group) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_before_free_ms: int = 0, + ): + """ + Initializes a :class:`MixtureOfExperts` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP, including the expert and shared layers, but + not the top-level module. The model may later be wrapped with a + top-level FSDP external to this method if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + delay_before_free_ms (int): Delay before resharding expert + parameters in the forward pass (in ms). + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return MixtureOfExperts( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = MixtureOfExperts( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class MLP(nn.Module): + def __init__( + self, + dim: int, + device: Optional[torch.device] = None, + *, + bias: bool = True, + with_buffer: bool = False, + dim_multiplier: int = 4, + ): + super().__init__() + self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias) + self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias) + if with_buffer: + self.register_buffer("buffer", torch.randn((dim,), device=device)) + else: + self.buffer = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + z = self.in_proj(x) + z = F.relu(z) + z = self.out_proj(z) + z = F.relu(z) + if self.buffer is not None: + z = z + self.buffer + return z + + def reset_parameters(self): + if self.buffer is not None: + torch.nn.init.normal_(self.buffer) + + +class MLPStack(nn.Sequential): + def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False): + modules: List[nn.Module] = [ + # Use multiplier of 3 to exercise uneven case + MLP(mlp_dim, dim_multiplier=3), + MLP(mlp_dim), + MLP(mlp_dim, dim_multiplier=3), + ] + if with_seq_parallel: + modules.append(nn.LayerNorm(mlp_dim, bias=False)) + super().__init__(*modules) + self.with_seq_parallel = with_seq_parallel + + def parallelize( + self, + tp_mesh: DeviceMesh, + dp_mesh: DeviceMesh, + use_activation_checkpointing: bool, + **fsdp_kwargs, + ) -> "MLPStack": + parallelize_plan = { + # Pass `use_local_output=False` to keep as DTensor to preserve + # uneven activation dims + "0.in_proj": ColwiseParallel(use_local_output=False), + "0.out_proj": RowwiseParallel(use_local_output=False), + "1.in_proj": ColwiseParallel(use_local_output=False), + "1.out_proj": RowwiseParallel(use_local_output=False), + "2.in_proj": ColwiseParallel(use_local_output=False), + "2.out_proj": RowwiseParallel(output_layouts=Shard(1)) + if self.with_seq_parallel + else RowwiseParallel(), + } + if self.with_seq_parallel: + parallelize_plan["3"] = SequenceParallel(sequence_dim=1) + parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan) + for module in self: + if isinstance(module, nn.LayerNorm): + continue + if use_activation_checkpointing: + checkpoint(module) + fully_shard(module, mesh=dp_mesh, **fsdp_kwargs) + fully_shard(self, mesh=dp_mesh, **fsdp_kwargs) + return self + + +class DoubleLinear(nn.Module): + """ + This can be used for returning multiple outputs from a module + (``use_second_linear=True``) or for having an unused module (``False``). + """ + + def __init__(self, dim: int, use_second_linear: bool = True): + super().__init__() + self.lin1 = nn.Linear(dim, dim) + self.lin2 = nn.Linear(dim, dim) + self.relu = nn.ReLU() + self.use_second_linear = use_second_linear + + def forward( + self, x: torch.Tensor + ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]: + if self.use_second_linear: + return self.relu(self.lin1(x)), self.relu(self.lin2(x)) + return self.relu(self.lin1(x)) + + +# NOTE: For these patch methods, if we want safety under multi-threading (e.g. +# when using multi-threaded process group), then we want: +# (1) a barrier immediately after reading the original value to ensure that all +# threads see the same original value +# (2) a barrier immediately before restoring the original value to ensure that +# all threads use the patched value inside the context +@contextlib.contextmanager +def patch_all_gather(new_all_gather_into_tensor: Callable): + orig_all_gather = dist.all_gather_into_tensor + dist.barrier() + dist.all_gather_into_tensor = new_all_gather_into_tensor + try: + yield + finally: + dist.barrier() + dist.all_gather_into_tensor = orig_all_gather + + +@contextlib.contextmanager +def patch_reduce_scatter(new_reduce_scatter_tensor: Callable): + orig_reduce_scatter = dist.reduce_scatter_tensor + dist.barrier() + dist.reduce_scatter_tensor = new_reduce_scatter_tensor + try: + yield + finally: + dist.barrier() + dist.reduce_scatter_tensor = orig_reduce_scatter + + +@contextlib.contextmanager +def patch_all_reduce(new_all_reduce: Callable): + orig_all_reduce = dist.all_reduce + dist.barrier() + dist.all_reduce = new_all_reduce + try: + yield + finally: + dist.barrier() + dist.all_reduce = orig_all_reduce + + +@no_type_check +@contextlib.contextmanager +def patch_unshard(new_unshard: Callable): + orig_unshard = FSDPParamGroup.unshard + dist.barrier() + FSDPParamGroup.unshard = new_unshard + try: + yield + finally: + dist.barrier() + FSDPParamGroup.unshard = orig_unshard + + +@no_type_check +@contextlib.contextmanager +def patch_post_backward(new_post_backward: Callable): + orig_post_backward = FSDPParamGroup.post_backward + dist.barrier() + FSDPParamGroup.post_backward = new_post_backward + try: + yield + finally: + dist.barrier() + FSDPParamGroup.post_backward = orig_post_backward + + +@no_type_check +@contextlib.contextmanager +def patch_register_post_backward_hook_backward(new_backward: Callable): + orig_backward = RegisterPostBackwardFunction.backward + dist.barrier() + RegisterPostBackwardFunction.backward = new_backward + try: + yield + finally: + dist.barrier() + RegisterPostBackwardFunction.backward = orig_backward + + +def reduce_scatter_with_assert( + cls, + orig_reduce_scatter: Callable, + assert_fn: Callable, # `assert_fn(output: Tensor)` + *args: Any, + **kwargs: Any, +): + if len(args) > 0: + output = args[0] + elif "output" in kwargs: + output = kwargs["output"] + else: + raise AssertionError( + f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}" + ) + assert_fn(output) + return orig_reduce_scatter(*args, **kwargs) + + +def check_sharded_parity( + cls, # unit test class + replicated_module: nn.Module, + sharded_module: nn.Module, + prefixes_to_ignore: Tuple[str, ...] = (), +): + for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip( + replicated_module.named_parameters(), sharded_module.named_parameters() + ): + clean_sharded_name = sharded_name + for prefix in prefixes_to_ignore: + clean_sharded_name = clean_sharded_name.replace(prefix, "") + cls.assertEqual(replicated_name, clean_sharded_name) + cls.assertIsInstance(sharded_param, DTensor) + assert isinstance(sharded_param, DTensor) # mypy + mesh, placements = sharded_param.device_mesh, sharded_param.placements + if tuple(placements) == (Shard(0), Shard(0)): + raise AssertionError( + "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), " + "so we cannot check for equality using it" + ) + sharded_ref_param = distribute_tensor(replicated_param, mesh, placements) + cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local()) + if replicated_param.grad is None: + cls.assertIsNone(sharded_param.grad) + continue + cls.assertIsNotNone(sharded_param.grad) + sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements) + cls.assertIsInstance(sharded_param.grad, DTensor) + assert isinstance(sharded_param.grad, DTensor) # mypy + cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local()) + + +class FSDPTestMultiThread(MultiThreadedTestCase): + @property + def world_size(self): + return torch.cuda.device_count() if torch.cuda.is_available() else 4 + + def setUp(self): + super().setUp() + self._spawn_threads() + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + def perThreadSetUp(self): + torch._dynamo.reset() + + def perThreadTearDown(self): + torch._dynamo.reset() + + +class FSDPTest(MultiProcessTestCase): + def setUp(self): + super().setUp() + # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`, + # which can cause unit test flakiness: + # https://github.com/pytorch/pytorch/issues/90848 + os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0" + self._spawn_processes() + + @property + def world_size(self): + return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4 + + @property + def process_group(self): + return dist.distributed_c10d._get_default_group() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + def _check_cpu_offload(self, fsdp_model, cpu_offload): + self.assertEqual(cpu_offload, fsdp_model.cpu_offload) + + def _check_backward_prefetch(self, fsdp_model, backward_prefetch): + self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch) + + def _check_forward_prefetch(self, fsdp_model, forward_prefetch): + self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + @classmethod + def _run(cls, rank, test_name, file_name, pipe): + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + print(f"dist init r={self.rank}, world={self.world_size}") + + # Specify gloo backend to make 'init_process_group()' succeed, + # Actual tests will be skipped if there is no enough GPUs. + backend = "nccl" if torch.cuda.is_available() else "gloo" + + try: + dist.init_process_group( + init_method=self.init_method, + backend=backend, + world_size=int(self.world_size), + rank=self.rank, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + device_ids = None + if torch.cuda.is_available() and torch.cuda.device_count(): + device_id = self.rank % torch.cuda.device_count() + torch.cuda.set_device(device_id) + device_ids = [device_id] + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + dist.barrier(device_ids=device_ids) + + torch._dynamo.reset() + self.run_test(test_name, pipe) + torch._dynamo.reset() + + dist.barrier(device_ids=device_ids) + + dist.destroy_process_group() + + def _train_for_several_steps( + self, + model: nn.Module, + num_steps: int, + autocast: bool, + lr: float = 0.01, + fsdp_cpu_offload: Optional[CPUOffload] = None, + save_model: bool = False, + mixed_precision: Optional[MixedPrecision] = None, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + ): + cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params + + model_device = next(model.parameters()).device + if sharded_grad_scaler_kwargs is None: + sharded_grad_scaler_kwargs = {} + sharded_grad_scaler = ShardedGradScaler( + enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs + ) + # use SGD with momentum instead of Adam, since Adam is scale invariant + # and this makes it bad for tests + optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) + for _ in range(num_steps): + optim.zero_grad() + with torch.cuda.amp.autocast(enabled=autocast): + # Inputs always cuda regardless of cpu offloading, or model.device + input = model.module.get_input(torch.device("cuda")) + if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)): + if isinstance(input, torch.Tensor): + input = input.half() + else: + input = tuple(x.half() for x in input) + output = model(*input) + # Post-forward, if CPU offloading model param should be on CPU. + if ( + cpu_offload_params + and isinstance(model, FSDP) + # If not resharding after forward, the parameters are still + # exposed as unsharded views into the GPU flat parameter + and model.sharding_strategy + not in NO_RESHARD_AFTER_FORWARD_STRATEGIES + ): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + + loss = model.module.get_loss(input, output).to(model_device) + loss = sharded_grad_scaler.scale(loss) + + if not mixed_precision and not use_pure_fp16: + assert ( + loss.dtype == torch.float32 + ), "loss data type should be float32, as the original \ + parameter data type is float32." + else: + if use_pure_fp16: + self.assertEqual(loss.dtype, torch.float16) + # FSDP loss is fp16, DDP AMP loss is fp32 + elif isinstance(model, FSDP): + assert mixed_precision is not None # mypy + self.assertEqual(loss.dtype, mixed_precision.param_dtype) + else: + self.assertEqual(loss.dtype, torch.float32) + model.module.run_backward(loss) + # Post-backward, if CPU offloading model params should be on CPU. + if cpu_offload_params and isinstance(model, FSDP): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + # Unscale the gradients and step + sharded_grad_scaler.step(optim) + # Update the scale factor + sharded_grad_scaler.update() + # if save_model, simulate save + load. + if save_model: + state_dict = {k: v.clone() for k, v in model.state_dict().items()} + # Zero params, if save/load state_dict did not work properly, this + # would break the parity test with DDP. + _zero_model(model) + model.load_state_dict(state_dict) + + if isinstance(model, FSDP): + model._assert_state(TrainingState.IDLE) + return loss.detach() # type: ignore[possibly-undefined] + + def _test_fsdp_parity( + self, + model_class: Type[FSDPTestModel], + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + ref_init_fn: Optional[Callable] = None, + num_iters: int = 2, + save_model: bool = True, + cpu_offload: CPUOffload = CPUOffload(), + backward_prefetch: Optional[BackwardPrefetch] = None, + sharding_strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + forward_prefetch: bool = False, + use_orig_params: bool = False, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + init_kwargs: Optional[Dict[str, Any]] = None, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + **fsdp_kwargs, + ): + """ + Tests FSDP training against a reference, which defaults to DDP but + may be customized with ``ref_init_fn``. + + Args: + model_class (Type[FSDPTestModel]): A model class that inherits from + ``FSDPTestModel``, which defines the expected interface. + fsdp_init_mode (FSDPInitMode): The mode to initialize the + FSDP-wrapped model. This should not be ``NO_FSDP``. + ref_init_fn (Optional[Callable]): A callable to invoke that wraps a + non-wrapped model to construct the reference model, where this + wrapper should provide data parallel semantics. If ``None``, + then the callable defaults to the DDP constructor. + """ + assert ( + fsdp_init_mode != FSDPInitMode.NO_FSDP + ), "Expects an FSDP init mode that wraps with FSDP" + if init_kwargs is None: + init_kwargs = {} + lr = 1e-2 + rank = self.process_group.rank() + # Establish reference behavior with DDP + model = model_class.init( + self.process_group, + FSDPInitMode.NO_FSDP, + CUDAInitMode.CUDA_BEFORE, + deterministic=True, + **init_kwargs, + ) + if ref_init_fn is None: + ref_model = DDP(model, device_ids=[rank], output_device=rank) + else: + ref_model = ref_init_fn(model) + if use_pure_fp16: + ref_model = ref_model.half() + ref_loss = self._train_for_several_steps( + ref_model, + num_iters, + autocast=mixed_precision is not None, + lr=lr, + fsdp_cpu_offload=cpu_offload, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + ddp_params = list(ref_model.parameters()) + # Check against FSDP behavior + fsdp_kwargs.update( + { + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "sharding_strategy": sharding_strategy, + "mixed_precision": mixed_precision, + "forward_prefetch": forward_prefetch, + "use_orig_params": use_orig_params, + } + ) + try: + fsdp_model = model_class.init( + self.process_group, + fsdp_init_mode, + cuda_init_mode, + fsdp_kwargs, + deterministic=True, + **init_kwargs, + ) + except Exception as e: + raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e + if not isinstance(fsdp_model, FSDP): + # Enforce that we wrap with top-level FSDP since we are comparing + # assuming a data parallel reference and some test models may not + # do so in their `init()` method + fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs) + if use_pure_fp16: + # Change the model parameter dtype after FSDP initialization + fsdp_model = fsdp_model.half() + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + offload_params = cpu_offload is not None and cpu_offload.offload_params + # Offloading parameters with `CUDA_AFTER` should raise an error during + # lazy initialization due to the parameter devices not being CPU; + # otherwise, all parameter devices should be CPU + expects_device_error = ( + offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER + ) + expects_cpu_device = ( + offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER + ) + if expects_cpu_device: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + context = ( + self.assertRaisesRegex( + RuntimeError, + "An FSDP-managed module with parameter CPU offloading enabled " + "has parameters on cuda", + ) + if expects_device_error + else nullcontext() + ) + with context: + fsdp_loss = self._train_for_several_steps( + fsdp_model, + num_iters, + autocast=False, + lr=lr, + fsdp_cpu_offload=cpu_offload, + save_model=save_model, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + # No need to check for parameter and loss parity if expecting an error + if expects_device_error: + return + # Check parameter devices are CPU if offloading to CPU before calling + # `get_full_params()`, which will cast the parameters to FP32 + if offload_params: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + fsdp_loss = fsdp_loss.cuda() + fsdp_unsharded_params = get_full_params(fsdp_model) + # Do not check dtype since the reference DDP loss may not be the same + # dtype as the FSDP loss in the case of mixed precision + torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False) + # Do not check for parameter parity if using mixed precision since (1) + # the DDP parameters are in FP16 (from `half()`) while the FSDP + # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs + # the optimizer in FP16 while FSDP runs it in FP32 + # TODO: Disable checking the parameters for pure FP16 due to floating + # point inaccuracy. Note that this means that the backward pass is not + # checked: https://github.com/pytorch/pytorch/issues/90784 + if mixed_precision is None and not use_pure_fp16: + self.assertEqual( + ddp_params, + fsdp_unsharded_params, + exact_device=True, + msg="FSDP did not match DDP", + ) + + +def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None): + def fully_shard_with_compiled_compute(*args, **kwargs): + torch.distributed._composable.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator] + if compile_compute_on_module is None or isinstance( + args[0], compile_compute_on_module + ): + args[0].compile() + + class FullyShardMode(Enum): + EAGER = auto() + COMPILED_COMPUTE = auto() + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + original_fully_shard = torch.distributed._composable.fsdp.fully_shard + for mode in FullyShardMode: + if mode != FullyShardMode.EAGER and not has_triton(): + warnings.warn("Inductor on GPU needs Triton and recent GPU arch") + continue + # barrier to ensure thread reading the same value + original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks + original_compile_threads = torch._inductor.config.compile_threads + torch.distributed.barrier() + + if mode == FullyShardMode.EAGER: + fully_shard_patch = original_fully_shard + elif mode == FullyShardMode.COMPILED_COMPUTE: + torch._dynamo.config.skip_fsdp_hooks = True + torch._inductor.config.compile_threads = 1 + fully_shard_patch = fully_shard_with_compiled_compute # type: ignore[assignment] + else: + raise NotImplementedError( + f"Need to implement FullyShardMode={mode}" + ) + + # fully_shard is imported as a global + # through `from ... import fully_shard` + func.__globals__[original_fully_shard.__name__] = fully_shard_patch + func(*args, **kwargs) + # other threads use patched func before this thread restores + torch.distributed.barrier() + func.__globals__[original_fully_shard.__name__] = original_fully_shard + torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks + torch._inductor.config.compile_threads = original_compile_threads + + return wrapper + + return decorator + + +class SkipModule(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.lin(x) + + +class NestedLinear(nn.Module): + def __init__(self, fsdp_wrap): + super().__init__() + if fsdp_wrap: + self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda()) + else: + self.nested_linear = nn.Linear(10, 10, bias=False).cuda() + + def forward(self, x): + return self.nested_linear(x) + + +class SkipModel(nn.Module): + def __init__(self, double_nest): + super().__init__() + self.linear = nn.Linear(10, 10, bias=False).cuda() + self.linear_skip = SkipModule().cuda() + self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest)) + + def forward(self, x): + x = self.linear(x) + x = self.linear_skip(x) + x = self.nested_linear(x) + return x diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..8a676c7e16c780b52f156cece3a7b7347c6f465b --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py @@ -0,0 +1,323 @@ +# mypy: ignore-errors + +# Torch +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized + +# Testing utils +from torch.testing._internal.common_dtype import floating_and_complex_types_and +from torch.testing._internal.common_utils import TestCase, \ + freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from itertools import chain +from typing import List, Union +from torch._C import TensorType + +import io + +def check_output_types(self, func, ref_outputs, args, kwargs): + graph = getattr(func, 'last_graph', None) + types = [o.type() for o in graph.outputs()] + self.assertTrue(len(types) == 1) + t = types[0] + torch._C._jit_assert_is_instance(ref_outputs, t) + +# Test names in this set are only checked for a single derivative +nn_functional_single_grad = frozenset('test_nn_' + name for name in [ + 'pdist', + 'multilabel_margin_loss', + 'max_unpool3d', + 'multi_margin_loss', + 'binary_cross_entropy', + 'binary_cross_entropy_size_average', + 'ctc_loss', + 'grid_sample', +]) + +def check_against_reference(self, func, reference_func, output_func, args, kwargs=None, + allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False): + """Verifies a function performs identically to some reference implementation. + + Commonly, this is used to verify that a JIT implementation + (output_func) matches the behavior of the eager implementation + (reference_func). + """ + kwargs = kwargs if kwargs else {} + + def allSum(vs): + if isinstance(vs, torch.Tensor): + vs = (vs,) + return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum() + for i, v in enumerate(vs) + if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16)) + + def clone_tensor(t, preserve_requires_grad): + require_grad = preserve_requires_grad and t.requires_grad + return t.detach().clone().requires_grad_(require_grad) + + def clone_inputs(preserve_requires_grad: bool): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(clone_tensor(arg, preserve_requires_grad)) + elif is_iterable_of_tensors(arg): + inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg]) + else: + inputs.append(arg) + + return inputs + + # Returns tensors in args that requires_grad, including tensors in TensorList args + def get_recording_tensors(args): + recording_tensors: List[torch.Tensor] = [] + + for arg in args: + if isinstance(arg, torch.Tensor) and arg.requires_grad: + recording_tensors.append(arg) + elif is_iterable_of_tensors(arg): + recording_tensors.extend(filter(lambda t: t.requires_grad, arg)) + + return recording_tensors + + # test no gradients case + nograd_inputs = clone_inputs(preserve_requires_grad=False) + outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs) + with enable_profiling_mode_for_profiling_tests(): + outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs) + self.assertEqual(outputs, outputs_test) + + if check_types: + check_output_types(self, func, outputs_test, nograd_inputs, kwargs) + + if no_grad: + # skip grad tests + return + + with enable_profiling_mode_for_profiling_tests(): + # test single grad case + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + grads = torch.autograd.grad(allSum(outputs), recording_tensors, + allow_unused=allow_unused) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + # test the grad grad case + if self._testMethodName in nn_functional_single_grad or no_gradgrad: + return + + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + l1 = allSum(outputs) + grads = torch.autograd.grad(l1, recording_tensors, create_graph=True, + allow_unused=allow_unused) + + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused) + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + l1_test = allSum(outputs_test) + grads_test = torch.autograd.grad( + l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused) + + l2_test = (allSum(grads_test) * l1_test) + grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + for g2, g2_test in zip(grads2, grads2_test): + if g2 is None and g2_test is None: + continue + self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4) + +class JitCommonTestCase(TestCase): + def createFunctionFromGraph(self, trace): + graph = trace if isinstance(trace, torch._C.Graph) else trace.graph() + return torch._C._create_function_from_graph("forward", graph) + + def assertExportImport(self, trace, inputs): + m = self.createFunctionFromGraph(trace) + self.assertExportImportModule(m, inputs) + + def assertExportImportModule(self, m, inputs): + m_import = self.getExportImportCopy(m) + a = self.runAndSaveRNG(m, inputs) + b = self.runAndSaveRNG(m_import, inputs) + self.assertEqual(a, b, "Results of original model and " + "exported/imported version of model differed") + + def runAndSaveRNG(self, func, inputs, kwargs=None): + kwargs = kwargs if kwargs else {} + with freeze_rng_state(): + results = func(*inputs, **kwargs) + return results + + def getExportImportCopy(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + torch.jit.save(m, buffer) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + + if not also_test_file: + return imported + + with TemporaryFileName() as fname: + torch.jit.save(imported, fname) + return torch.jit.load(fname, map_location=map_location) + + def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph, + fusion_nodes_not_found, non_fusible_nodes_being_fused, + fusion_nodes_found, nodes_in_diff_graph): + err_msg = "\nFailure in testing nodes' autodifferentiation. " + if should_autodiff_node: + err_msg += "One or more nodes were expected to be autodiffed, " \ + "but were not found in specified fusible/nonfusible " \ + "DifferentiableGraph groups. \nSpecifically:" + # The node is intended to appear in a differentiable graph but doesn't + diff_nodes_missing = [] + # The node is intended to appear in a differentiable graph + # outside of a fusion group but instead is in a fusion group + diff_nodes_in_fusion = [] + # The node is intended to appear in a fusion group but doesn't + fusion_nodes_missing = [] + # The node is intended to appear in a fusion group but instead + # is just in an outer differentiable graph + fusion_nodes_in_diff = [] + for node in nodes_not_in_diff_graph: + if node in non_fusible_nodes_being_fused: + diff_nodes_in_fusion.append(node) + else: + diff_nodes_missing.append(node) + for node in fusion_nodes_not_found: + if node in nodes_in_diff_graph: + fusion_nodes_in_diff.append(node) + else: + fusion_nodes_missing.append(node) + if len(diff_nodes_missing) > 0: + err_msg += f"\n {diff_nodes_missing} were not in one of the " \ + "DifferentiableGraphs when they were expected to be. " \ + "Did you intend for these nodes to be autodiffed? " \ + "If not, remove them from the list of nonfusible nodes." + if len(diff_nodes_in_fusion) > 0: + err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \ + "when they were expected to be just in a DifferentiableGraph. If it was " \ + "intended for these nodes to be in FusionGroups, reclassify these nodes as " \ + "fusible nodes. If these nodes were not intended to be fused, your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_missing) > 0: + err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be. " \ + "They were also not found in an outer DifferentiableGraph. Did you " \ + "intend for these nodes to be autodifferentiated? If not, you should " \ + "remove these nodes from the test's fusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_in_diff) > 0: + err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be, " \ + "instead they were found just in an outer DifferentiableGraph. " \ + "Did you intend for these nodes to be fused? If not, you should " \ + "move these nodes into the test's nonfusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + else: + err_msg += "One or more nodes were not expected to be autodiffed " \ + "but were found in a DifferentiableGraph or in a FusionGroup " \ + "of a DifferentiableGraph. Did you intend for these nodes to be " \ + "autodiffed? If so, change this test to expect autodifferentiation. " \ + "\nSpecifically:" + if len(fusion_nodes_found) > 0: + err_msg += f"\n {fusion_nodes_found} were not expected to be in " \ + "one of the DifferentiableGraphs, but appeared in a FusionGroup " \ + "of a DifferentiableGraph. " + if len(nodes_in_diff_graph) > 0: + err_msg += f"\n {nodes_in_diff_graph} were not expected to " \ + "be in one of the DifferentiableGraphs but were." + return err_msg + + def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes): + diff_nodes = graph.findAllNodes('prim::DifferentiableGraph') + diff_subgraphs = [node.g('Subgraph') for node in diff_nodes] + + # Note: currently no tests have fusible_nodes + fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs])) + fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes] + + # For any non-fusible node, it must show up in one of the DifferentiableGraphs. + nodes_in_diff_graph = [] + nodes_not_in_diff_graph = [] + non_fusible_nodes_being_fused = [] + for node in nonfusible_nodes: + if any(g.findNode(node) is not None for g in diff_subgraphs): + nodes_in_diff_graph.append(node) + else: + nodes_not_in_diff_graph.append(node) + if any(g.findNode(node) is not None for g in fusion_subgraphs): + non_fusible_nodes_being_fused.append(node) + found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes) + + # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs. + fusion_nodes_found = [] + fusion_nodes_not_found = [] + for node in fusible_nodes: + if any(g.findNode(node) is not None for g in fusion_subgraphs): + fusion_nodes_found.append(node) + else: + fusion_nodes_not_found.append(node) + found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes) + + if should_autodiff_node is not None: + err_msg = self.autoDiffErrorMessage(should_autodiff_node, + nodes_not_in_diff_graph, + fusion_nodes_not_found, + non_fusible_nodes_being_fused, + fusion_nodes_found, + nodes_in_diff_graph) + self.assertEqual(should_autodiff_node, + found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg) + + def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]], + traced_graph, assert_propagation, constant_prop=True): + # repropagte input shapes provided by tracing, + prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled() + for enable_test_mode in [True, False]: + # here we are testing allowing/disallowing substituting in complete shapes as constants, + # disallowing constants helps stress test partial eval and substitution pipeline + torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode) + torch._C._jit_erase_non_input_shape_information(traced_graph) + if constant_prop: + torch._C._jit_pass_constant_propagation(traced_graph) + torch._C._jit_pass_propagate_shapes_on_graph(traced_graph) + # Add sizes to default tensor type to avoid checking something out of scope + # and difficulties with tracer leaving in other parts of tensor type + output = next(traced_graph.outputs()).type() + + def test_type(type, actual_size): + sizes = type.symbolic_sizes() + out_type = TensorType.get().with_sizes(sizes) + actual_type = TensorType.get().with_sizes(actual_size) + + # always check actual shape is a subtype of the output + self.assertTrue(actual_type.isSubtypeOf(out_type)) + + # and then if assertion flag is provided, check shape analysis + # is successful + if assert_propagation: + self.assertEqual(out_type.sizes(), actual_size) + + if output.isSubtypeOf(torch._C.TensorType.get()): + test_type(output, out_sizes) + else: + tuple_elements = output.elements() + for i in range(len(tuple_elements)): + test_type(tuple_elements[i], out_sizes[i]) + + torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb11ce15e5f22aee4c08afa214ce5737a92fae7 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py @@ -0,0 +1,24144 @@ +# mypy: ignore-errors + +from functools import wraps, partial +from itertools import product, chain, islice +import itertools +import functools +import copy +import operator +import random +import unittest +import math +import enum + +import torch +import numpy as np +from torch import inf, nan + +from typing import Any, Dict, List, Tuple, Union, Sequence +from torch.testing import make_tensor +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, + floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, + all_types, empty_types, complex_types_and, integral_types, custom_types, +) +from torch.testing._internal.common_device_type import \ + (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIf, precisionOverride, + skipCPUIfNoMklSparse, + toleranceOverride, tol) +from torch.testing._internal.common_cuda import ( + PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_FUSED_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, + SM53OrLater, SM80OrLater, SM90OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, + _get_torch_rocm_version, +) +from torch.testing._internal.common_utils import ( + make_fullrank_matrices_with_distinct_singular_values, + TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, + torch_to_numpy_dtype_dict, numpy_to_torch_dtype, TEST_WITH_ASAN, + GRADCHECK_NONDET_TOL, slowTest, TEST_WITH_SLOW, + TEST_WITH_TORCHINDUCTOR +) +from torch.testing._utils import wrapper_set_seed + +import torch._refs as refs # noqa: F401 +import torch._refs.nn.functional +import torch._refs.special +import torch._refs.linalg +import torch._prims as prims # noqa: F401 +from torch.utils import _pytree as pytree + + +from packaging import version + +from torch.testing._internal.opinfo.core import ( # noqa: F401 + L, + M, + S, + XS, + _NOTHING, + _getattr_qual, + DecorateInfo, + SampleInput, + ErrorInput, + AliasInfo, + NumericsFilter, + OpInfo, + _generate_reduction_inputs, + _generate_reduction_kwargs, + sample_inputs_reduction, + ReductionOpInfo, + reference_inputs_elementwise_binary, + make_error_inputs_elementwise_binary, + generate_elementwise_binary_tensors, + generate_elementwise_binary_arbitrarily_strided_tensors, + generate_elementwise_binary_small_value_tensors, + generate_elementwise_binary_large_value_tensors, + generate_elementwise_binary_extremal_value_tensors, + generate_elementwise_binary_broadcasting_tensors, + generate_elementwise_binary_with_scalar_samples, + generate_elementwise_binary_with_scalar_and_type_promotion_samples, + generate_elementwise_binary_noncontiguous_tensors, + sample_inputs_elementwise_binary, + BinaryUfuncInfo, + sample_inputs_elementwise_unary, + generate_elementwise_unary_tensors, + generate_elementwise_unary_small_value_tensors, + generate_elementwise_unary_large_value_tensors, + generate_elementwise_unary_extremal_value_tensors, + reference_inputs_elementwise_unary, + UnaryUfuncInfo, + sample_inputs_spectral_ops, + SpectralFuncType, + SpectralFuncInfo, + ShapeFuncInfo, + sample_inputs_foreach, + ForeachFuncInfo, + gradcheck_wrapper_hermitian_input, + gradcheck_wrapper_triangular_input, + gradcheck_wrapper_triangular_input_real_positive_diagonal, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + clone_sample, +) +from torch.testing._internal.opinfo.refs import ( # NOQA: F401 + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, + ReductionPythonRefInfo, + ElementwiseUnaryPythonRefInfo, + ElementwiseBinaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, + reference_reduction_numpy, + prod_numpy +) +from torch.testing._internal import opinfo +from torch.testing._internal.opinfo.definitions.linalg import ( + sample_inputs_linalg_cholesky, + sample_inputs_linalg_cholesky_inverse, + sample_inputs_cross, + sample_inputs_linalg_qr_geqrf, + sample_inputs_linalg_invertible, + sample_inputs_lu_solve, + sample_inputs_legacy_solve, + sample_inputs_svd, + sample_inputs_linalg_det_logdet_slogdet, + sample_inputs_linalg_lu, + sample_inputs_diagonal_diag_embed, + error_inputs_diagonal_diag_embed, +) +from torch.testing._internal.opinfo.definitions.special import ( + sample_inputs_i0_i1, + sample_inputs_polygamma, + reference_polygamma, +) +from torch.testing._internal.opinfo.definitions._masked import ( + sample_inputs_softmax_variant, +) +from torch.testing._internal.opinfo.definitions.sparse import ( + error_inputs_sparse_like_fns, + sample_inputs_sparse_like_fns, + error_inputs_sparse_mul, + sample_inputs_sparse_mul, + error_inputs_sparse_reduction_sum, + sample_inputs_sparse_reduction_sum +) + +if TEST_SCIPY: + from scipy import stats + import scipy.spatial + import scipy.special + + +# test if a tensor is close to an integer +def close_to_int(x, eps=0.1): + if x.is_complex(): + y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) + else: + y = torch.abs(torch.frac(x)) + return (y < eps) | (y > (1 - eps)) + + +def sample_inputs_slice(op_info, device, dtype, requires_grad, **kwargs): + + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + yield SampleInput(make_input(3), 0) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2, step=3) + + yield SampleInput(make_input(20, 30, 40), dim=0, start=-10, end=-2, step=2) + + +def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + args_cases = ( + # Cases with tensor indices. + (torch.tensor([1, 2, 3]),), + (torch.tensor(1),), + (torch.tensor([1, 2, 3]), 1), + (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), + # Cases with list of indices. + ((2, 4),), + ((2, 4), 1), + ((2, 4), -1), + # Cases with integer section. + (3,), + (3, 1), + (3, -1), + ) + + for args in args_cases: + yield SampleInput(make_input((S, S, S)), args=args) + + +def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6, S), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + yield SampleInput(make_arg(S, S, 6), 2) + +def error_inputs_hsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " + "but got a tensor with 0 dimensions!") + yield ErrorInput(SampleInput(make_arg(()), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg((S, S, S)), 0), error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput( + SampleInput(make_arg((S, S, S)), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_vsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), + error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput(SampleInput(make_arg(S, S, S), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_dsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) + + +def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = ( + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), + ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), + ) + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + kwargs = dict(storage_offset=storage_offset) + yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) + +def sample_inputs_as_strided_partial_views(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(): + base = make_tensor((20,), device=device, dtype=dtype) + return base[5:15].requires_grad_(requires_grad) + + # as_strided on offset, partial views + yield SampleInput(make_arg(), (2, 2), (1, 2)) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=0) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=10) + +def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = [ + ((1,), (), (), 0), + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((3, 3), (2, 2), (2, 1), 0), + # Scatter to larger dimensions + ((16,), (2, 2, 2, 2), (8, 4, 2, 1), 0), + # Scatter to larger dimensions with strides inverted + ((16,), (2, 1, 1, 2), (1, 2, 4, 8), 0), + ] + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + input_src = make_arg(output_shape) + yield SampleInput(input_t, input_src, output_shape, stride, storage_offset=storage_offset) + + +def error_inputs_as_strided_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # Create a small tensor and try to scatter it out of bounds + input_t = make_arg([4, 4]) + input_src = make_arg([2, 2]) + yield ErrorInput( + SampleInput(input_t, input_src, [2, 2], [200, 200], storage_offset=0), + error_regex="itemsize 4 requiring a storage size of 1604 are out of bounds for storage of size 64" + ) + + +def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): + inputs = ( + (0,), + (0, 1), + (0, 1, 2, 3), + ) + + rvals = [1, 2, 4] + + products = product(inputs, rvals, [False, True]) + + for input_data, r, with_replacement in products: + input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(input_t, r=r, with_replacement=with_replacement) + +def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # constructs 1-D tensors with varying number of elements + a = make_arg((0,)) + b = make_arg((0, 1)) + c = make_arg((0, 1, 2, 3)) + + # sample with only 1 tensor + yield SampleInput(a) + + # sample with 2 tensors + yield SampleInput(a, b) + + # sample with 3 tensors + yield SampleInput(a, b, c) + +def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input_shape, dict of dim and eps + cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S), {'dim': 1}), + ((S, 2), {'dim': -1}), + ((S,), {'dim': 0, 'eps': 0.5}), + ((), {'dim': 0}), + ((S, S, M), {'dim': 2}), + ((S, S), {}) + ) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) + # Test for Broadcasting + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) + yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + + +def sample_inputs_item(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + cases = ( + (), + (()), + (1), + ((1,)), + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def error_inputs_item(op, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + + cases = ( + (M), + ((S,)), + (S, S), + (S, M, L), + ) + + for shape in cases: + yield ErrorInput( + SampleInput(make_arg(shape)), error_type=RuntimeError, + error_regex="elements cannot be converted to Scalar") + + +def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for training, momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), + ((3, 2, 4), {'training': False, 'momentum': -1.2}), + ((3, 1), {'training': True, 'momentum': 0.0}), + ((0,), {'training': True}), + ((0,), {'training': False}), + ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), + ((2, 1), {}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight = make_arg(channels) if channels > 0 else None + bias = make_arg(channels) if channels > 0 else None + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + weight, + bias + ), + kwargs=kwargs + ) + + # Checking for permutations of weights and biases as `None` + weights = [channels, None, None] + biases = [None, channels, None] + is_training = [True, False, False] + + for weight, bias, training in zip(weights, biases, is_training): + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + make_arg(channels), + make_arg(channels) + ), + kwargs={'training': training} + ) + + # Test case for no optional kwargs + # running_mean and running_var are required in evaluation mode (training: False) but not in training mode + yield SampleInput(make_arg((1, 2, 3)), args=(None, None, None, None), kwargs={'training': True}) + +def sample_inputs_softmax_backward_data(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), 0), + ((S, S), 0), + ((S, M, S), -1), + ] + input_dtypes = [dtype] + if dtype == torch.float and device == 'cuda': + input_dtypes += [torch.float16] + + for (shape, dim), input_dtype in product(cases, input_dtypes): + input = make_arg(shape) + output = torch.nn.functional.softmax(input, dim=dim, dtype=input_dtype) + yield SampleInput(make_arg(shape), output, dim, input_dtype) + +def sample_inputs_native_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + + +def sample_inputs__native_batch_norm_legit(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if args[0] is not None and args[1] is not None: + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + else: + yield SampleInput(sample.input, args=(args[2], args[3], training, momentum, eps)) + +def sample_inputs__batch_norm_with_update(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if any(args[i] is None for i in range(4)): + continue + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], momentum, eps)) + +def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs): + op_kwargs = op_info.sample_kwargs(device, dtype, None)[0] + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad, + op_kwargs=op_kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + for weight in [-1., 0., 0.8, 1.]: + weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(shape), args=(weight_tensor,)) + + channel_size = shape[1] if len(shape) >= 2 else 1 + yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) + + weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) + +def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs) + yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs) + +def sample_kwargs_prelu_scalar_weight(device, dtype, input): + weight = torch.rand(tuple(), device=device, dtype=dtype) + # NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case + if dtype == torch.bfloat16: + weight_cpu = weight.to(dtype=torch.float32, device="cpu") + else: + weight_cpu = weight.cpu() + np_weight = weight_cpu.numpy() + return ({'weight': weight}, {'weight': np_weight}) + +def error_inputs_prelu(op, device): + # Weight has numel != 1, but self.ndim is zero-dim tensor + inp = make_tensor(tuple(), device=device, dtype=torch.float32) + weight = make_tensor((2,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Not allow zero-dim input tensor.") + + # Weight has numel != 1, but numel does not match channel size + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((9,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Mismatch of parameter numbers and input channel size.") + + # Weight is neither a scalar nor 1-D tensor + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((2, 4), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = 2") + + # src and index tensors must have the same # of dimensions +def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # ord = inf is tested in inputs_norm_inf as it fails on some tests + cases = [ + ((S, S), (2,), '2'), + ((S, S), (0,), '0'), + ((S, S), (0.5,), '0_5'), + ((S, S), (1,), '1'), + ((S, S), (3,), '3'), + ((S, S), (-1,), 'neg_1'), + ((S, S), (-2,), 'neg_2'), + ((S, S), (-0.5,), 'neg_0_5'), + ((S, S), (-1.5,), 'neg_1_5'), + ] + + cases_nonzero_input = ( + ((S, S, S), (1.5,), '1_5_default'), + ((S, S, S), (1.5, 1), '1_5_dim'), + ((S, S, S), (1.5, -1), '1_5_neg_dim'), + ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), + ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), + ) + + cases_posdim = ( + ((S, S), (-2, 1,), 'neg_2_dim'), + ((S, S), (-1, 1,), 'neg_1_dim'), + ((S, S), (0, 1,), '0_dim'), + ((S, S), (1, 1,), '1_dim'), + ((S, S), (2, 1,), '2_dim'), + ((S, S), (3, 1,), '3_dim'), + ((S, S, S), (2, 1), '2_dim'), + ((S, S, S), (3, 1), '3_dim'), + ((S, S, S), (2, 1, True), 'keepdim_2_dim'), + ((S, S, S), (3, 1, True), 'keepdim_3_dim'), + ((), (2, 0), '2_dim_scalar'), + ((), (3, 0), '3_dim_scalar'), + ((), (2, 0, True), 'keepdim_2_dim_scalar'), + ((), (3, 0, True), 'keepdim_3_dim_scalar'), + ) + + cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) + for shape, args, name in cases_posdim) + + for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): + yield SampleInput(make_arg(shape), args=args, name=name) + + for shape, args, name in cases_nonzero_input: + yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) + + +def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (), 'default'), + ((S, S), ('fro',), 'fro_default'), + ((S, S), ('fro', [0, 1],), 'fro'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), ('nuc',), 'nuc'), + ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (-inf,), '-inf'), + ((S, S), (inf,), 'inf'), + ((S, S), (inf, 1,), 'inf_2_dim'), + ((S, S), (inf, -1,), 'inf_2_neg_dim'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((), (S,)), + ((S, 1), (S,)), + ((M, S), ()), + ((S, S), (S, S)) + ) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + rhs = make_arg(shape_rhs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input) + if shape_lhs == shape_rhs: + yield SampleInput(lhs, args=(lhs.clone().detach_(),)) + + + +def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + num_inputs = kwargs.get('num_inputs') + sample_kwargs = kwargs.get('sample_kwargs', {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + + args = [] + for i in range(num_inputs - 1): + args.append(make_arg(shape_rhs)) + broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) + + yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) + +def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((S, 1), S), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + for shape in shapes: + inp, *arg0 = shape + yield SampleInput(inp, args=tuple(arg0)) + +def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds alpha kwarg cases + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) + neg_alpha = -3.125 if (dtype.is_floating_point or dtype.is_complex) else -3 + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) + +def error_inputs_arange(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer') + yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range') + yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range') + +def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs): + int_samples = ( + # positive direction + (-1, 2, 2), + # negative direction + (2, -3, -1), + # start == end + (1, 1, 1), + (1, 1, -1), + # divides evenly + (0, -8, -4), + (1, 5, 2), + # bool + (False, True, True), + # default step + (0, 1, None), + # default start + (None, 3, None), + ) + + def to_float(start, end, step): + start = start + 0.1 if start is not None else None + end = end + 0.1 + step = float(step) if step is not None else None + return start, end, step + + float_samples = ( + # includes endpoint + (0., -8. - 1e-6, -4.), + (1., 5. + 1e-6, 2.), + (0., -8., -4.), + (1., 5., 2.), + *(to_float(start, end, step) for (start, end, step) in int_samples), + ) + + large_samples = ( + (0, 10000, None), + ) + + samples = int_samples + float_samples + if dtype not in (torch.int8, torch.uint8): + samples += large_samples + + for start, end, step in samples: + if start is None: + assert step is None + # Pass end as positional arg + yield SampleInput(end, kwargs={"dtype": dtype, "device": device}) + # (Similar to) calling torch.arange(end=3) + yield SampleInput(0, kwargs={"end": end, "dtype": dtype, "device": device}) + elif step is None: + yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(2) + yield SampleInput(1, args=(3, 1)) + +def sample_inputs_randn(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + shapes = ( + (M,), + (S, S) + ) + + for shape in shapes: + yield SampleInput(input=shape, kwargs=dict(dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((S, S), 0, 5), + ((S, S, S), -2, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + +def error_inputs_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = -1 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"normal expects std >= 0.0, but found std {invalid_std}", + ) + +def sample_inputs_cauchy(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.5), + ((S, S), 0, 1), + ((S, S, S), -2, 1), + ) + for shape, median, gamma in samples: + yield SampleInput(make_arg(shape), args=(median, gamma)) + + +def error_inputs_cauchy(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_scale = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_scale,)), + error_type=RuntimeError, + error_regex=fr"cauchy_ expects sigma > 0.0, but found sigma={invalid_scale}", + ) + + +def sample_inputs_exponential(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.5), + ((S, S), 1), + ((S, S, S), 1.5), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_exponential(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_rate = 0 + yield ErrorInput( + SampleInput(t, args=(invalid_rate,)), + error_type=RuntimeError, + error_regex=fr"exponential_ expects lambda > 0.0, but found lambda={invalid_rate}", + ) + + +def sample_inputs_geometric(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.2), + ((S, S), 0.5), + ((S, S, S), 0.8), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_geometric(op, device, **kwargs): + t = torch.zeros([10], device=device) + neg_prob = -1 + yield ErrorInput( + SampleInput(t, args=(neg_prob,)), + error_type=RuntimeError, + error_regex=fr"geometric_ expects p to be in \(0, 1\), but got p={neg_prob}", + ) + + +def sample_inputs_log_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.25), + ((S, S), 0.5, 1), + ((S, S, S), 0, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + + +def error_inputs_log_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"log_normal_ expects std > 0.0, but found std={invalid_std}", + ) + + +def sample_inputs_uniform(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), -100, 100), + ((S, S), 0, 1), + ((S, S, S), 1, 2), + ) + for shape, hi, lo in samples: + yield SampleInput(make_arg(shape), args=(hi, lo)) + +def sample_inputs_ones_zeros(op, device, dtype, requires_grad, **kwargs): + # this is a bit messy, as we want the args to be tuples + # so if we pass size as a tuple, we have a tuple containing a tuple + sizes = ( + (M,), + (S, S), + ) + for size in sizes: + yield SampleInput(size, kwargs={'dtype': dtype, 'device': device}) + +def sample_inputs_full(op, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + sizes = ( + (M,), + (S, S), + ) + fill_values = [get_val(dtype), get_val(torch.int)] + + for size, fill_value in product(sizes, fill_values): + yield SampleInput(size, fill_value, dtype=dtype, device=device) + + +def error_inputs_uniform(op, device, **kwargs): + t = torch.zeros([10], device=device) + yield ErrorInput( + SampleInput(t, args=(3, -1)), + error_type=RuntimeError, + error_regex=r"uniform_ expects to return a \[from, to\) range, but found from=3 > to=-1", + ) + + +def error_inputs_linspace(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative') + yield ErrorInput( + SampleInput(0, args=(3, 1.)), + error_type=TypeError, + error_regex="received an invalid combination of arguments - got \\(int, int, float", + ) + yield ErrorInput( + SampleInput(torch.tensor([1, 1], device=device), args=(torch.tensor([3, 3], device=device), 1)), + error_type=RuntimeError, + error_regex="only supports 0-dimensional start and end tensors" + ) + + +def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)] + for start, end, nstep in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_linspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device, requires_grad=False) + + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps, is_start_end_tensors)) + [(0, 7, 50, (True, True))] + for start, end, nstep, (is_start_tensor, is_end_tensor) in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + + tensor_options = {"dtype": dtype, "device": device} + if is_start_tensor: + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if is_end_tensor: + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_logspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + for start, end, nstep, base in product(starts, ends, nsteps, bases): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_logspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device) + for start, end, nstep, base, (is_start_tensor, is_end_tensor) in product(starts, ends, nsteps, bases, is_start_end_tensors): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + + tensor_options = {"dtype": dtype, "device": device} + + if (is_start_tensor): + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if (is_end_tensor): + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Creates additional inputs to test the rtol, atol, and equal_nan params + rtols = [0., 1e-7] + atols = [0., 1e-7] + equal_nans = [False, True] + + products = product(rtols, atols, equal_nans) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for rtol, atol, equal_nan in products: + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs, args=(rhs,), + kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) + + +def error_inputs_isclose(op, device, **kwargs): + make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}), + error_type=RuntimeError, + error_regex='rtol must be greater than or equal to zero') + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}), + error_type=RuntimeError, + error_regex='atol must be greater than or equal to zero') + + +def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((1, 2))) + yield SampleInput(make_arg((2,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + first_shape, second_shape = (S, M), (M, S) + + yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) + + if dtype.is_complex: + yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) + + # Matmul of empty matrices + yield SampleInput(make_arg((0, S)), args=(make_arg(S, M),)) + yield SampleInput(make_arg((S, 0)), args=(make_arg(0, M),)) + + +def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) + beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) + tests_list = [ + ((2, 3), (2, 2), (2, 3), False), + ((3, 3), (3, 3), (3, 3), False), + ] + tests_with_lhs_broadcasting = [ + ((1,), (2, 2), (2, 3), True), + ((), (2, 2), (2, 3), True), + ] + test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] + + kwargs = dict(alpha=alpha_val, beta=beta_val) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape_a, shape_b, shape_c, broadcasts_input in test_cases: + yield SampleInput( + make_arg(shape_a), + make_arg(shape_b), + make_arg(shape_c), + **kwargs, + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shape = (3, 3) + yield SampleInput( + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + make_arg(shape), + **kwargs, + ) + yield SampleInput( + make_arg(shape), + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + **kwargs, + ) + # addmm of empty matrices + if dtype.is_floating_point: + yield SampleInput(make_arg(S, M), make_arg(S, 0), make_arg(0, M), **kwargs) + # empty matmul with broadcastable input + yield SampleInput(make_arg(M), make_arg(S, 0), make_arg(0, M), **kwargs).with_metadata(broadcasts_input=True) + +def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha = 2 + 3j if dtype.is_complex else 0.6 + beta = 1 + 2j if dtype.is_complex else 0.2 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C + for m, n, k in itertools.product([0, 5], repeat=3): + yield SampleInput( + torch.eye(m, n, device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + make_arg((k, n)), + alpha=alpha, + beta=beta, + ) + +def sample_inputs_sparse_mm_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + reductions = ["sum", "mean", "amax", "amin"] + for m, k, reduce in product([5, 7], [3, 11], reductions): + yield SampleInput( + torch.eye(m, m) + .to(device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + reduce, + ) + + +def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, M), make_arg(M)) + +def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(M, S, M), make_arg(M, M, S)) + +def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + yield SampleInput(make_arg((S, )), make_arg((S, ))) + if dtype.is_complex: + # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) + # is tested in test_conj_view (which tests operations with only conjugated input tensor + # -- not conjugated arg tensors) + yield SampleInput(make_arg((S, )), make_arg_conj((S, ))) + + +def error_inputs_dot_vdot(op_info, device, is_ref=False, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + if not is_ref: + yield ErrorInput(SampleInput(make_input(1), args=(make_input(3, dtype=torch.float16),)), + error_regex='dot : expected both vectors to have same dtype') + yield ErrorInput(SampleInput(make_input(1, 1), args=(make_input(3),)), + error_regex='1D tensors expected') + yield ErrorInput(SampleInput(make_input(9), args=(make_input(3),)), + error_regex='inconsistent tensor size') + if device != "cpu" and not is_ref: + yield ErrorInput(SampleInput(make_input(3), args=(make_input(3, device="cpu"),)), + error_regex='Expected all tensors to be on the same device') + + +def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = (((S,), (S, M), (M,), 1, 1, False), + ((S,), (S, M), (M,), 0.2, 0.6, False), + ) + + test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), + ((1,), (S, M), (M,), 0.2, 0.6, True), + ((), (S, M), (M,), 1, 1, True), + ((), (S, M), (M,), 0.2, 0.6, True), + ) + + cases = test_cases + test_cases_with_broadcast + + # addmv performs: beta * M + alpha * (mat @ vec) + for size, mat, vec, beta, alpha, broadcasts_input in cases: + yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) + +def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting + test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + + for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: + if dtype.is_complex: + beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) + +def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + test_cases = [(((S, S), (S, S), (S, S)), False), + (((S, S), (S, 1), (1, S)), False), + (((1,), (S, S, 1), (1, S)), True), + (((), (), ()), False), + (((S, S), (), ()), True), + (((), (S, S, 1), (1, S)), True) + ] + + for input_args, broadcasts_input in test_cases: + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput(*args).with_metadata(broadcasts_input=broadcasts_input) + + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput( + *args, value=3.14 if dtype.is_floating_point or dtype.is_complex else 3 + ).with_metadata(broadcasts_input=broadcasts_input) + +def reference_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_addcmul_addcdiv( + op_info, device, dtype, requires_grad, **kwargs) + + # type promotion cases + supported_dtypes = op_info.supported_dtypes(device) + make_arg = partial(make_tensor, device=device, requires_grad=requires_grad) + + types = ( + (torch.float64, torch.complex128), + (torch.bfloat16, torch.float32), + ) + + values = ( + None, + True, False, + 3.14, 3, + 1.0, 1, + 0.0, 0, + -3.14, -3, + 3.14 + 2.71j, + ) + + for (type2, type3), value in product(types, values): + if (type2 not in supported_dtypes or + type3 not in supported_dtypes): + continue + + # RuntimeError: value cannot be converted without overflow + if (type(value) is complex and + type2 is not torch.complex128): + continue + + arg1 = make_arg([5, 5], dtype=dtype) + arg2 = make_arg([5, 5], dtype=type2) + arg3 = make_arg([1, 5], dtype=type3) + + # TypeError: addcdiv(): argument 'value' must be Number, not NoneType + if value is not None: + yield SampleInput(arg1, args=(arg2, arg3), kwargs=dict(value=value)) + else: + yield SampleInput(arg1, args=(arg2, arg3)) + +def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): + test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta, + alpha=alpha + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shapes = [(S, S, S), (S, M, S), (S, S, M)] + args = tuple(make_arg(s) for s in shapes) + yield SampleInput( + args[0].transpose_(-1, 1), + args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), + args[2].transpose(-1, 1).conj().requires_grad_(requires_grad), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ) + +# TODO: add reduction kwargs +def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (S,), + (S, S), + ) + + for shape in shapes: + # Produce one with weight and one without. + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), + kwargs={'weight': _make_tensor(shape, requires_grad=False)}) + +def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None + ) + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M)) + + yield SampleInput(make_arg(), make_arg(S), make_arg(M)).with_metadata(broadcasts_input=True) + + if dtype.is_complex: + alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j + elif dtype.is_floating_point: + alpha, beta = 0.2, 0.6 + else: + alpha, beta = 2, 3 + + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M), beta=beta, alpha=alpha) + + yield SampleInput( + make_arg(), + make_arg(S), + make_arg(M), + beta=beta, + alpha=alpha, + ).with_metadata(broadcasts_input=True) + + # These samples fail gradcheck + if dtype.is_floating_point and not requires_grad: + tensor_options = dict(device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput( + torch.tensor([[math.nan]], **tensor_options), + torch.tensor([0.0], **tensor_options), + torch.tensor([0.0], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + + yield SampleInput( + torch.tensor([[0.0]], **tensor_options), + torch.tensor([math.nan], **tensor_options), + torch.tensor([math.nan], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + +def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ((), (S, S, S), (S,)) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1), {}), + ((S,), make_target([], low=0, high=S), {"p": 1}), + ((S,), make_target([1], low=0, high=S), {"p": 2}), + ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), + ((S, M), make_target([S], low=0, high=M), {"margin": -3.14}), + ((M, S), make_target([M], low=0, high=S), {"weight": None}), + ((M, S), make_target([M], low=0, high=S), {"weight": make_weight([S], low=-10., high=10.)}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "none"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "mean"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "sum"}), + ) + + for input_shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1)), + ((S,), make_target([], low=0, high=S)), + ((S,), make_target([1], low=0, high=S)), + ((M, S), make_target([M], low=0, high=S)), + ) + ps = (1, 2) + margins = (0, 7, -3.14) + weights = (False, True) + reductions = (None, "none", "mean", "sum") + + for (input_shape, target), p, margin, weight, reduction in product(inputs, ps, margins, weights, reductions): + input = _make_tensor(input_shape) + weight_shape = [input.size(-1)] if input.ndim > 0 else [1] + weight = make_weight(weight_shape, low=-10., high=10.) if weight else None + kwargs = {"p": p, "margin": margin, "weight": weight} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(input, args=(target,), kwargs=kwargs) + + +def error_inputs_multi_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, error_regex=r'inconsistent target size, expected 5 but got \[5, 4\]') + # invalid target dtype + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, error_regex='expected scalar type Long but found Float') + # invalid weight + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(())}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5, 4)}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5,)}), + error_type=RuntimeError, error_regex=r'inconsistent weight size, expected 4 but got \[5\]') + # invalid p + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'p': 3}), + error_type=ValueError, error_regex='only p == 1 and p == 2 supported') + + +def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((), (0,), True), + ((S, S), (1,), True), + ((S, S), (1,), False), + ((S, S), (-2,), False), + ((S, S), (0, 1), False), + ) + # Test large inputs to check numerical stability + lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,) + for low in lows: + high = low * 2 if low is not None else None + for shape, dim, keepdim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=low, high=high, + requires_grad=requires_grad) + yield SampleInput(t, dim, keepdim) + +def reference_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs) + + # https://github.com/pytorch/pytorch/issues/91843 + t = torch.tensor([20, 30, 100], dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + t = torch.tensor((), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + # tests masking + # https://github.com/pytorch/pytorch/pull/91860#pullrequestreview-1241344073 + t = torch.tensor(float("inf")) + yield SampleInput(t, 0, True) + +def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + inputs = [ + ((), {}), + ((S, S), {}), + ((0, S, 0), {}), + ((S,), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), {'dtype': torch.double}), + ((S,), {'device': 'cpu'}), + ((S,), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), {'device': 'cuda'})) + + for shape, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, **kwargs) + +def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) + + # shape + cases = ( + (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in cases: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + +def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + + inputs = ( + ([], make_target([], low=0, high=1), {}), + ([S], make_target([S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "none"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "mean"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "sum"}), + ) + + for shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_target_tensor = partial(torch.tensor, device=device, dtype=torch.long, requires_grad=False) + + inputs = ( + # random tests including -1 target labels + ([], make_target([], low=-1, high=1)), + ([S], make_target([S], low=-1, high=S)), + ([M, S], make_target([M, S], low=-1, high=S)), + # repeated target labels and -1 (labels after the first -1 are ignored) + ([], make_target_tensor(-1)), + ([7], make_target_tensor([2, 0, 6, -1, 4, -1, 6])), + ([4, 5], make_target_tensor([[4, -1, 0, -1, 2], [0, 0, 4, 1, 4], [-1, 3, -1, 1, 0], [4, 3, 2, 1, 0]])), + ) + reductions = (None, "none", "mean", "sum") + + for (shape, target), reduction in product(inputs, reductions): + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def error_inputs_multilabel_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(0,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(4,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[4\] for input of size: \[5, 4\]') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input((),),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[\] for input of size: \[5, 4\]') + + +def get_independent_tensor(tensor): + return tensor.clone().requires_grad_(tensor.requires_grad) + +def sample_inputs_randint(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + sample.kwargs.setdefault('device', device) + # With high + yield SampleInput(high, sample.input.shape, *sample.args, **sample.kwargs) + # With low and high + yield SampleInput(low, high, sample.input.shape, *sample.args, **sample.kwargs) + +def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + # With high + yield SampleInput( + sample.input, + high, + *sample.args, + **sample.kwargs) + # With low and high + yield SampleInput( + get_independent_tensor(sample.input), + low, + high, + *sample.args, + **sample.kwargs) + +def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (), + (S,), + (S, S), + (S, S, S), + ) + + margins = (0., 1.) + reductions = ('sum', 'mean', 'none') + + for shape in shapes: + for margin, reduction in product(margins, reductions): + kwargs = {'margin': margin, 'reduction': reduction} + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=False), + _make_tensor(shape, requires_grad=False)), + kwargs=kwargs) + +def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp1 = make_input((10, )) + inp1[2] = float('nan') + inp2 = make_input((10, )) + inp2[4] = float('nan') + target = make_input((10, )) + inp2[9] = float('nan') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Inf handling + inp1 = make_input((10, )) + inp2[1] = float('inf') + inp2 = make_input((10, )) + inp2[4] = float('inf') + target = make_input((10, )) + inp2[7] = float('inf') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Broadcasting + inp1 = make_input((5, 2)) + inp2 = make_input((5, 1)) + target = make_input((1, 2)) + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + +def error_inputs_margin_ranking_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value. + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + # invalid input shapes + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), + error_regex='margin_ranking_loss : All input tensors should') + +def sample_inputs_new_fns(self, device, dtype, requires_grad, *, is_strided=False, **kwargs): + # input_shape, output_shape, strides, kwargs + # lengths of output_shape and strides must be equal + inputs = [ + ((), (), (), {}), + ((S, S), (2, 0), (3, 4), {}), + ((0, S, 0), (3, 2, 2), (1, 2, 3), {}), + ((S,), (2, 3), (7, 8), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), (10,), (S,), {'dtype': torch.double}), + ((S,), (1, 1, 12), (S, L, M), {'device': 'cpu'}), + ((S,), (2, 2, 2), (L, M, S), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), (7, 2), (3, 4), {'device': 'cuda'})) + + for input_shape, output_shape, strides, kwargs in inputs: + t = make_tensor(input_shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + if is_strided: + yield SampleInput(t, output_shape, strides, **kwargs) + else: + yield SampleInput(t, output_shape, **kwargs) + +def sample_inputs_empty_strided(op, device, dtype, requires_grad=False, **kwargs): + + inputs = [ + ((), (), {'dtype': dtype, 'device': device}), + ((S,), (4,), {'dtype': dtype, 'device': device}), + ((S, S), (2, 1), {'dtype': dtype, 'device': device}), + ((S, S, S), (2, 0, 1), {'dtype': dtype, 'device': device}), + ] + + for shape, strides, kwargs in inputs: + yield SampleInput(shape, strides, requires_grad=requires_grad, **kwargs) + +def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + yield SampleInput(case, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_empty_permuted(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + for layout in itertools.permutations(range(len(case))): + yield SampleInput(case, layout, device=device, dtype=dtype, requires_grad=requires_grad) + +def error_inputs_empty_permuted(op_info, device, **kwargs): + yield ErrorInput( + SampleInput((2,), args=((0, 1),)), + error_type=RuntimeError, + error_regex="Number of dimensions in size does not match the length of the physical_layout" + ) + yield ErrorInput( + SampleInput((2,), args=((3,),)), + error_type=RuntimeError, + error_regex="Dimension out of range" + ) + yield ErrorInput( + SampleInput((2, 3), args=((0, 0),)), + error_type=RuntimeError, + error_regex="Duplicate dim not allowed" + ) + +def sample_inputs_scalar_tensor(op, device, dtype, requires_grad, **kwargs): + # Not including a scalar tensor in vals because meta tests start failing due to + # lack of meta support for _local_scalar_dense + # torch.tensor(2, device=device) + vals = (-5, 0, 1) + + for item in vals: + yield SampleInput(item, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_eye(op, device, dtype, requires_grad, **kwargs): + # only ints >= 0 are allowed for both arguments, unless m is omitted + sizes = (None, 0, 1, 2, 3, 4, 7, L, M, S) + + for n, m in product(sizes, sizes): + if n is None: + continue + + # TODO: no layout + _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} + if m is None: + yield SampleInput(n, args=(), kwargs=_kwargs) + else: + yield SampleInput(n, args=(m,), kwargs=_kwargs) + +def error_inputs_eye(op_info, device, **kwargs): + # TODO: no layout + _kwargs = {'device': device, 'dtype': torch.float32} + + yield ErrorInput( + SampleInput(-1, args=(), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -1" + ) + + yield ErrorInput( + SampleInput(-7, args=(42,), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -7" + ) + + yield ErrorInput( + SampleInput(0, args=(-3,), kwargs=_kwargs), + error_regex="m must be greater or equal to 0, got -3" + ) + + +def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): + # The scalar we are passing to new_full must be the same dtype + # as the one of the resulting tensor + use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype + yield SampleInput( + sample.input, *sample.args, get_val(use_dtype), **sample.kwargs) + +def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + inputs = [ + ((), get_val(dtype), {}), + ((S, S), get_val(dtype), {}), + ((0, S, 0), get_val(dtype), {}), + ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), get_val(torch.double), {'dtype': torch.double}), + ((S,), get_val(dtype), {'device': 'cpu'}), + ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) + + for shape, fill_value, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, fill_value, **kwargs) + +def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): + cases = [ + ([3], 3, {}), + ([10], 3, {}), + ([3, 10], 3, {}), + ([3], 3, dict(replacement=False)), + ([3], 3, dict(replacement=True)), + ([3, 4], 4, dict(replacement=True)), + ([3, 4], 4, dict(replacement=False)), + ] + + for shape, num_samples, kwargs in cases: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + yield SampleInput(t, num_samples, **kwargs) + +def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): + def get_value_or_make_tensor(value_or_shape): + if isinstance(value_or_shape, list): + return make_tensor(value_or_shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + return value_or_shape + + for value_or_mean_shape, value_or_std_shape, kwargs in cases: + mean = get_value_or_make_tensor(value_or_mean_shape) + std = get_value_or_make_tensor(value_or_std_shape) + yield SampleInput(mean, std, **kwargs) + +def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): + # value_or_size, value_or_size, kwargs + cases = [ + ([], [], {}), + ([3], [3], {}), + ([3, 4, 2], [3, 4, 2], {}), + ([2, 3], 1.1, {}), + ([1, 2, 3], [5, 2, 3], {}), # broadcasting + ] + + return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) + +def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): + yield SampleInput(1.6, 0.3, [2, 3], dtype=dtype, device=device) + yield SampleInput(1.6, 0.3, [2, 2, 2], dtype=dtype, layout=torch.strided, device=device) + yield SampleInput(2.7, make_tensor([4, 3], dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad)) + +def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): + shapes = [ + [3], + [], + [0, 3], + [2, 3, 4], + ] + + for shape in shapes: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=1, + requires_grad=requires_grad) + yield SampleInput(t) + +def error_inputs_bernoulli(op_info, device, **kwargs): + # more than one element of the written-to tensor refers to a single memory location + x = torch.rand((1,), device=device).expand((6,)) + err_msg = 'unsupported operation' + yield ErrorInput(SampleInput(torch.rand_like(x), kwargs={'out': x}), + error_regex=err_msg) + +def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((S, S, S), 0), + ((S, S, S), 1), + ((), 0), + ) + + for large_number in (True, False): + for shape, dim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + + if large_number and t.dim() > 0: + t[0] = 10000 + yield SampleInput(t, dim) + +def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): + yield SampleInput( + make_tensor((S, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + + +def error_inputs_trace(op, device): + yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix") + + +def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((S, S, S), (2, 1, 0.5)), + ((S, S, S), (2, -1, 0.5)), + ((S, S, S), (1, 2, 3)), + ((S, S, S), (float('inf'), 2, 0.5)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((1, 2, 3), (-1, -2)), + ((1, 2, 3), (-1, 2)), + ((1, 2, 3), (1, -2)), + ((1, 2, 3), (1, 2)), + ((), (0, 0)), + ((1, ), (0, 0)), + ((M, M), (0, 1)), + ((S, S, S), (2, 0)), ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def _numpy_ref_transpose(a, dim0, dim1): + if a.ndim <= 1: + return a + + return np.swapaxes(a, dim0, dim1) + +def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((1, 2, 3), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((M, M), (M, L)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def error_inputs_T(self, device, has_ndims_error=False): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # Deprecated behavior in regular PyTorch, but throws an error in primTorch: + # https://github.com/pytorch/pytorch/issues/86968 + if has_ndims_error: + # ndims == 1 + yield ErrorInput(SampleInput(make_arg(M)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + # ndims > 2 + yield ErrorInput(SampleInput(make_arg(M, S, L)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + +def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False): + """ + This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). + Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. + """ + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batches = [(), (0, ), (2, ), (1, 1)] + size = [1, 5, 10] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + a = make_arg((*batch, m, k)) + b = make_arg((*batch, n, k)) + yield a, b + + +def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # Function that's well defined on the outputs for complex inputs + def fn(usv): + U, S, V = usv + return U @ V.mH, S + + for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): + *batch, m, k = a.shape + n = b.shape[-2] + + # NOTE: since svd_lowrank relies on non rank-revealing SVD, + # it inherits the problem of unstable behavior with repeated + # singular values including zeros. + # Since we want to avoid (repeated) zeros as singular values, + # we can only use k for q. + # This issues could be resolved with using a rank-revealing SVD + # which does not include "zero" singular values. + yield SampleInput(a, b, q=k, M=None).with_metadata(output_process_fn_grad=fn) + + for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): + *batch, m, k = a.shape + n = b.shape[-2] + M = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a, b, q=k, M=M).with_metadata(output_process_fn_grad=fn) + +def chunk_iter(iterable, size): + it = iter(iterable) + while True: + chunk = tuple(islice(it, size)) + if not chunk: + break + yield chunk + +def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # we reuse samples from svd_lowrank which come in group of two with + # kwarg['M'] = None and with kwarg['M'] = + samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) + for s1, s2 in chunk_iter(samples, 2): + del s1.kwargs['M'] + del s2.kwargs['M'] + s1.kwargs['center'] = False + s2.kwargs['center'] = True + yield s1 + yield s2 + +def np_sinc_with_fp16_as_fp32(x): + # Wraps numpy's sinc function so that fp16 values are promoted to fp32 + # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated + # at 0 for fp16. + if x.dtype == np.float16: + return np.sinc(x.astype(np.float32)) + else: + return np.sinc(x) + +def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + return ( + SampleInput( + make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), + shape, + ) for size, shape in test_cases) + +def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) + + m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + + cases = ( + ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), + ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) + ) + + for a, b, c, d in cases: + yield SampleInput(m(a), args=(m(b), m(c), m(d))) + yield SampleInput(n(a), args=(n(b), n(c), n(d))) + +def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = ( + ((1, S), (2, S), (3, S),), + ((S, 1), (S, 2), (S, 3),), + ((1,), (2,), (3,),), + ((2, S), (S,)) + ) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + # We also want to test mixed complex-non-complex inputs to block_diag + if dtype == torch.complex32 or dtype == torch.complex64: + non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 + make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): + small_S = 2 + test_cases = ( + ((S, S, 2), (S, S + 1, 2)), + ((S, S), (S, S)), + ((S, S, S), (S, S, S)), + ((3, 5), (3, 5)), + ((2, 3, 5), (2, 3, 5)), + ((1, 2, 3), (1, 2, 3)), + ((1, 1), (S, 1)), + ((0, 5), (4, 5)), + ((4, 5), (0, 5)), + ((0, 4, 5), (3, 5)), + ((4, 5), (0, 3, 5)), + ((0, 4, 5), (1, 3, 5)), + ((1, 4, 5), (0, 3, 5)), + # Using S here would make this one test take 9s + ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), + ((small_S, 1, 1, small_S), (1, small_S, small_S)), + ((1, 1, small_S), (small_S, 1, small_S, small_S)), + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: + # FIXME add an override for JIT and revert 0. back to 0 + # since it's accepted by eager + for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: + for t1_size, t2_size in test_cases: + # The args should never be non-contiguous as this is not supported in the backward + yield SampleInput(make_arg(t1_size), make_arg(t2_size), p, cm) + +def _fill_np(a, value): + a = a.copy() + a.fill(value) + return a + +def _fill_sample_kwargs(device, dtype, input): + if dtype is torch.bool: + value = True + else: + value = 3 + + return ({'value': value}, {'value': value}) + +def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds a sample input where both tensors have the same values + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + lhs = make_arg((S, S)) + yield SampleInput(lhs, args=(lhs.clone(),)) + +def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape x number of tensors + cases = ( + ((3, 4), 1), + ((1, 2, 1, 4), 3), + ((0, 1, 0), 2),) + + for shape, num_tensors in cases: + tensors = [] + for _ in range(num_tensors): + tensors.append(make_arg(shape)) + for dim in range(-1, len(shape) - 1): + yield SampleInput(tensors, args=(dim,)) + + +def sample_inputs_chunk_cat(op_info, device, dtype, requires_grad, **kwargs): + # 1. If input tensors have different ndims, dim should be non-negative and be less than the ndims of every input tensors. + # If all input tensors have the same ndims, we support both negative and non-negative dim. + # 2. For wrapped_dim, all tensors should have the same size for 0,...,wrapped_dim-1 dimensions. + # No requirements for (wrapped_dim, ...)-th dimension. + # 3. Expect positive num_chunks + # 4. Expect non-empty input tensor list and each input tensor should have at least 1 element + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + same_ndim_cases = ( + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], -1, 5 + ), + ( + [ + torch.Size([1, 2, 129]), + torch.Size([1, 2, 297]), + ], -1, 5 + ), + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], 1, 5 + ), + ( + [ + torch.Size([3, 3, 2, 1]), + torch.Size([1, 4, 2, 2]), + torch.Size([2, 1, 3, 3]), + ], 0, 2 + ), + ) + for sizes, dim, num_chunks in same_ndim_cases: + tensors = [] + for size in sizes: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + different_ndim_case = [ + torch.Size([2, 3, 3]), + torch.Size([2, 3, 1, 2]), + torch.Size([2, 3]), + torch.Size([2, 3, 2]), + torch.Size([2, 3, 271]), + ] + max_dim, num_chunks = 2, 3 + for dim in range(max_dim): + tensors = [] + for size in different_ndim_case: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + +def error_inputs_chunk_cat(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # input tensors have different ndims but dim is negative + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], -1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-negative dim when input tensors have different ndims', + ) + + # input tensors have different ndims but dim >= ndim of some input tensors + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects dim < ndim for all input tensors', + ) + + # some tensors have different sizes for 0, ..., dim-1 dimensions. + sizes, dim, num_chunks = [torch.Size([2, 3, 4]), torch.Size([4, 3])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors', + ) + + # negative num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, -1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # zero as num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, 0 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # empty input tensor list + dim, num_chunks = 0, 1 + yield ErrorInput( + SampleInput([], args=(dim, num_chunks)), + error_regex='_chunk_cat expects a non-empty input tensor list', + ) + + # empty input tensor with 0 elements + sizes, dim, num_chunks = [torch.Size([0,]), torch.Size([3,])], 0, 1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-empty tensor', + ) + + +def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] + ((S, S), (S, S), {'dim': -1}), + ((S, S), (S, S), {'dim': 1}), + ((M, S), (S, S), {'dim': 0}), # different shapes + ((1, 2, 3), (1, 2, 3), {'dim': -2}), + ((0,), (0,), {'dim': 0}), # empty tensor + ((0,), (S, S), {'dim': 1}), # empty tensor with unempty and dim=1 (special case for legacy_cat_wrap_dim) + ((0, S), (S, S), {'dim': 0}), + ((1,), (1,), {}) # dim not passed, fallback to default + ) + + for input_shape1, input_shape2, kwargs in cases: + yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) + + # from coat_lite_mini + yield SampleInput([make_arg((2, 2, 2, 2), memory_format=torch.channels_last)], args=(1,),) + +def error_inputs_cat(op_info, device, **kwargs): + + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for more than one element of the written-to tensor refer to a single memory location + yield ErrorInput(SampleInput([make_arg((S, S)), make_arg((S, S))], + kwargs={'out': make_arg((1, S)).expand((2 * S, S))}), + error_regex='unsupported operation') + + # error inputs for empty tensors + yield ErrorInput(SampleInput([], kwargs={'dim': 1}), + error_regex='non-empty list of Tensors') + + # error inputs for different sizes + yield ErrorInput(SampleInput([make_arg((S, S, L, L)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S, S, L, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + + # error inputs for different dimensions + yield ErrorInput(SampleInput([make_arg((S - 1, 0)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S - 1, 0))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + + # error inputs for same memory locations + x = torch.zeros((0), device=device) + y = torch.randn((4, 6), device=device) + + err_msg = "the written-to tensor refer to a single memory location" + + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': x}), + error_regex=err_msg) + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': y}), + error_regex=err_msg) + + z = torch.zeros((4, 6), device=device) + yield ErrorInput(SampleInput((y, z), kwargs={'out': z[:2, :]}), + error_regex=err_msg) + + # error inputs for different devices + if torch.device(device).type == 'cuda': + x_cuda = make_tensor((3, 3), device=device, dtype=torch.float32) + y_cpu = make_tensor((3, 3), device='cpu', dtype=torch.float32) + yield ErrorInput(SampleInput((x_cuda, y_cpu)), + error_regex='Expected all tensors to be on the same device') + + # error inputs for different input sizes for more than 2 tensors + yield ErrorInput(SampleInput([make_arg((L, 1)), make_arg((L, 1, 1)), make_arg((L, 1, 1))]), + error_regex='Tensors must have same number of dimensions') + + yield ErrorInput(SampleInput([make_arg((S, 1, M)), make_arg((S, 1, 1)), make_arg((S, M, 1))], + kwargs={'dim': 1}), + error_regex='Sizes of tensors must match') + + # error inputs for None input + yield ErrorInput(SampleInput((make_arg((S, 1, 1)), None)), error_type=TypeError, + error_regex='got None') + + # error inputs for zero-dimensional tensors + yield ErrorInput(SampleInput([make_arg(()), make_arg(())]), + error_regex='zero-dimensional.*cannot be concatenated') + + # error inputs for different dtype of out tensors + d = make_tensor((2, 3), device=device, dtype=torch.double) + x = make_tensor((2, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'out': d}), error_type=TypeError, + error_regex='invalid combination of arguments') + +def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Noncontiguous type promoting tensors + a = make_arg((3, 4, 2)) + b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) + c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) + + yield SampleInput((a, b, c), kwargs={'dim': 1}) + + # Special 1D tensor with dim length of 0 case + a = make_arg((0,)) + b = make_arg((3, 2, 2)) + + yield SampleInput((a, b, a)) + yield SampleInput((a, a, a)) + +def _elementwise_type_promo_np(*args, type_promotion_kind): + def _maybe_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + flattened = pytree.arg_tree_leaves(*args) + transformed = tuple(_maybe_torch(a) for a in flattened) + result_dtype, _ = prims.utils.elementwise_dtypes( + *transformed, + type_promotion_kind=type_promotion_kind) + return torch_to_numpy_dtype_dict[result_dtype] + +def _cat_np(input_seq, dim=0): + inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) + + if len(inputs) == 0: + np_dtype = _elementwise_type_promo_np( + input_seq, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) + return np.empty(0, dtype=np_dtype) + + return np.concatenate(inputs, axis=dim) + +def _floor_divide_np(a, b): + dtype = _elementwise_type_promo_np( + a, + b, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + if isinstance(a, np.ndarray): + a = a.astype(dtype) + if isinstance(b, np.ndarray): + b = b.astype(dtype) + return np.floor_divide(a, b) + +def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + tensor_shapes = ( + # First Tensor being 1-D is special + # case for hstack + ((S,), (S,), (S,)), + ((S, S), (S, S), (S, S)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + yield SampleInput(tensors) + +def error_inputs_hstack_dstack_vstack(op, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + tensor_shapes = ( + ((S,), (S, S, S, S), (S,)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + # Different dimension tensor + yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions") + + # empty tensor list + yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList") + +def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs): + # Note: we don't do any tests where we unbind along 0-length dims + # because in that case unbind returns and empty tuple, and that breaks + # some assumptions in some backward tests in test_ops.py + shape_dims = (((S,), 0), + ((S, S), 0), + ((S, S), 1), + ((S, S), -1), + ((S, 0, S), 0), + ((S, S, S), 1), + ) + for shape, dim in shape_dims: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + requires_grad=requires_grad), + args=(dim,)) + +def error_inputs_unbind(op_info, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions") + yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError, + error_regex="Dimension out of range") + +def reference_unbind(t, dim): + """A numpy implementation of torch.unbind""" + return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim)) + +def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((M, S)), + 0, + gather_variable((S, S), 1, M, True, device=device)) + yield SampleInput( + make_arg((M, S)), + 1, + gather_variable((M, S // 2), 0, S, True, device=device)) + # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 + yield SampleInput( + make_arg((S,)), + 0, + torch.tensor([], dtype=torch.uint8, device=device)) + # 0D tensor case + yield SampleInput( + make_arg(()), + 0, + torch.tensor([0], dtype=torch.int64, device=device)) + yield SampleInput( + make_arg(()), + 0, + torch.tensor(0, dtype=torch.int64, device=device)) + +def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): + for i in range(1 if dim == 0 else m): + for j in range(1 if dim == 1 else n): + for k in range(1 if dim == 2 else o): + ii = [i, j, k] + ii[dim] = slice(0, idx.size(dim) + 1) + idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] + +def error_inputs_gather(op_info, device, **kwargs): + # src is [1, 2] + # [3, 4] + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + + # idx is [0, 0] + # [1, 0] + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + + # Index should be smaller than self except on dimension 1 + bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), + error_regex="Size does not match at dimension 0") + + # Index must have long dtype + bad_idx = idx.to(torch.int32) + yield ErrorInput(SampleInput(src, args=(1, bad_idx)), + error_regex="Expected dtype int64 for index") + + # TODO: FIXME + # out.dtype must match src.dtype + # Creates new src & idx since SampleInputs can't share tensors + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + out = torch.empty((2, 2), device=device, dtype=torch.float64) + yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), + error_regex="Expected out tensor to have dtype") + + # src and index tensors must have the same # of dimensions + # idx too few dimensions + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor((0, 0), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # src too few dimensions + src = torch.tensor((1, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(0, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx,)), + error_regex="index 23 is out of bounds for dimension") + + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_take(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +# Error inputs for scatter +def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): + # Error when self.dtype != src.dtype (and src is not a scalar) + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.double) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected self.dtype to be equal to src.dtype") + + # Index dtype must be long + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected dtype int64 for index") + + # Index and destination must have the same number of dimensions + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as self tensor") + + # Index and src must have the same number of dimensions when src is not a scalar + src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as src tensor") + + # Index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="index 34 is out of bounds for dimension 0 with size 3") + +def error_inputs_renorm(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, + error_regex="needs at least 2 dimensions, got 0 dimensions") + + +def error_inputs_ormqr(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, + error_regex="input must have at least 2 dimensions") + + # https://github.com/pytorch/pytorch/issues/85218 + tensor_0 = torch.full((5, 0,), 1, device=device) + tensor_1 = torch.full((5,), 1, device=device) + tensor_2 = torch.full((5, 5,), 1, device=device) + bool_3 = True + bool_4 = True + yield ErrorInput(SampleInput(tensor_0, args=(tensor_1, tensor_2, bool_3, bool_4)), error_type=RuntimeError, + error_regex=r"tau.shape\[-1\] must be less than or equal to input.shape\[-1\]") + + +def error_inputs_diag(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + zero_d = torch.randn(1, 1, 1, device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + +def error_inputs_embedding(op_info, device, **kwargs): + indices = torch.rand(2, 2, device=device).long() + weights = [ + torch.tensor(1.0, device=device), + torch.tensor(1.0, device=device).reshape(1, 1, 1), + ] + + for weight in weights: + yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, + error_regex="'weight' must be 2-D") + + +def error_inputs_t(op_info, device, **kwargs): + yield ErrorInput( + SampleInput(torch.randn(2, 3, 4, 5, device=device)), + error_regex="expects a tensor with <= 2", + ) + + +def error_inputs_multinomial(op_info, device, **kwargs): + x = torch.empty(1, 2, 3, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="prob_dist must be 1 or 2 dim") + + x = torch.empty(1, 2, dtype=torch.long, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="multinomial only supports floating-point dtypes for input") + + x = torch.empty(1, 2, dtype=torch.double, device=device) + y = torch.empty(1, 2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), + error_regex="multinomial expects Long tensor out") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(0,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(-1,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3, False,)), + error_regex="cannot sample n_sample > prob_dist") + + x = torch.empty(16777217, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3,)), + error_regex="number of categories cannot exceed") + + inputs = ((1., -1., 1.), (1., inf, 1.), (1., -inf, 1.), (1., 1., nan)) + + err_msg1 = "probability tensor contains either `inf`, `nan` or element < 0" + err_msg2 = "invalid multinomial distribution" + + rep_arg = (False, True) if torch.device(device).type == 'cpu' else (False,) + + for rep in rep_arg: + kwargs = {'num_samples': 2, 'replacement': rep} + + for shape in inputs: + # error case when input tensor contains `inf`, `nan` or negative element + yield ErrorInput(SampleInput(torch.tensor(shape), kwargs=kwargs), + error_regex=err_msg1 if rep is False else err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 1-D input + x = torch.zeros(3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 2-D input + x = torch.zeros(3, 3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution + x[1, :] = 1 + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + +def error_inputs_gradient(op_info, device, **kwargs): + for dtype in [torch.long, torch.float32, torch.complex64]: + t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) + + dim = (1, 0) + spacing = [0.1] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected spacing to be unspecified, a scalar ') + + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), + error_type=RuntimeError, + error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') + + dim = (1, 1) + spacing = 0.1 + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='dim 1 appears multiple times in the list of dims') + + dim = (0, 1) + coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each tensor to be on the same device,') + + yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), + error_type=IndexError, error_regex='') + + t = torch.tensor([[1], [2], [3]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + + t = torch.tensor([[1, 2], [3, 4]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + +def sample_inputs_rrelu(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=dict(lower=0., upper=1., training=True)) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S)) + yield SampleInput(make_arg(S), training=False) + +def error_inputs_rrelu(op_info, device, **kwargs): + input = make_tensor((S, S), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(input, kwargs={'lower': 0.3, 'upper': 0.1}), + error_regex='Lower bound should be less than or equal to the upper bound') + +def error_inputs_masked_select(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + y = torch.rand((6,), device=device) + mask = torch.tensor([True, False, True, True, False, False], device=device) + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_median(op_info, device, **kwargs): + x = torch.tensor([[[[[[[[[[[[[[[[[[[[[[[[[nan], + [nan]]]]]]]]]]]]]]]]]]]]]]]]], device=device) + if device == 'cuda': + yield ErrorInput(SampleInput(x, kwargs=dict(dim=(-1))), + error_type=RuntimeError, + error_regex='CUDA Tensors cannot have more than 25 dimensions') + else: + return + + +def error_inputs_index_select(op_info, device, **kwargs): + x = torch.rand((1, 6), device=device).expand((2, 6)) + y = torch.rand((3, 6), device=device) + ind = torch.tensor([0, 1], dtype=torch.int64, device=device) + + yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_index_add(op_info, device, **kwargs): + result = torch.tensor([[1., 2.], [4., 5.], [7., 8.]]) + source = torch.tensor([2., 4.]) + + yield ErrorInput(SampleInput(result, args=(0, torch.tensor([0, 2]), source)), + error_type=RuntimeError, + error_regex=r'source tensor shape must match self tensor shape, ' + r'excluding the specified dimension. Got self.shape = \[3, 2\] source.shape = \[2\]') + +def error_inputs_logcumsumexp(op_info, device, **kwargs): + dim = 3 + srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] + for src in srcs: + yield ErrorInput(SampleInput(src, args=(dim,)), + error_type=IndexError, + error_regex='Dimension out of range') + +def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((S, S)), gather_variable((S, S), 1, S, True, device=device), 0) + + # `indices` broadcast + yield SampleInput( + make_arg((S, S)), gather_variable((1, S // 2), 0, S, True, device=device), 1) + + # `self` broadcast + yield SampleInput( + make_arg((1, S)), gather_variable((S, S // 2), 0, S, True, device=device), 1) + + # without `dim` arg + yield SampleInput( + make_arg((S, S)), gather_variable((S, S // 2), 0, S, True, device=device)) + + +def error_inputs_aminmax_amax_amin(op_info, device, is_ref=False, **kwargs): + + # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. + shape = (S, 0, S) + err_msg_amax_amin = "reduction" + err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) + + # Error Inputs for tensors with more than 64 dimension + sizes = [1] * 65 + err_msg1 = "only tensors with up to 64 dims are supported" + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), + error_regex=err_msg1) + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), + error_regex=err_msg1) + + # Error Inputs for repeated 'dim' + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + dims = [(0, 0), (0, -4)] + err_msg2 = "in the list of dims" + x = torch.randn(S, S, S, S, device=device) + for dim in dims: + yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) + + # Error Input for illegal dtype + input5 = torch.randn(L, L, dtype=torch.float32, device=device) + max_values = torch.empty(L, dtype=torch.float32, device=device) + min_values = torch.empty(L, dtype=torch.double, device=device) + illegal_values = torch.empty(L, dtype=torch.int, device=device) + + # Unlike regular PyTorch, amax and amin refs don't require input and out + # dtypes to match exactly: + # https://github.com/pytorch/pytorch/pull/87765#pullrequestreview-1162023824 + if is_ref: + err_msg_amax_amin2 = ("Attempting to cast from torch.float32 to out tensor with dtype " + "torch.int32, but this can't be cast because it is not safe!") + else: + err_msg_amax_amin2 = ("Expected the dtype for input and out to match, but got Float " + "for input's dtype and Int for out's dtype.") + err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" + + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), + error_regex=err_msg_amax_amin2) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), + error_regex=err_msg_aminmax2) + + # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim + err_msg3 = "reduction" + # FIXME: eager and ref impl throw different types of errors + error_type = IndexError if 'refs' not in op_info.name else RuntimeError + yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), + error_type=error_type, error_regex=err_msg3) + +def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): + test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S, S), {}), + ((S, S, S), {'dim': 1}), + ((S, S, S), {'dim': 1, 'keepdim': True}), + ((), {'dim': 0}), + ((), {}), + ((), {'dim': 0, 'keepdim': True}), + ((S, 0, S), {'dim': 0}), + ) + + for shape, kwargs in test_cases: + yield SampleInput( + make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), + **kwargs) + +def error_inputs_diff(op_info, device, **kwargs): + t = torch.rand((1, 3), device=device) + n = -1 + yield ErrorInput(SampleInput(t, args=(n, ), kwargs=kwargs), + error_type=RuntimeError, + error_regex=f'order must be non-negative but got {n}') + +def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = ( + ((1,), 0, None, None), + ((S,), 0, None, None), + ((S, 1), 0, None, None), + ((S, 1), 1, None, None), + ((S, S), 0, None, None), + ((S, S), 1, None, None), + ((S, S), 0, (1, S), (2, S)), + ((S, S), 0, None, (2, S)), + ((XS, XS, XS), 1, None, None), + ((XS, XS, XS), 2, None, None), + ((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)), + ((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)), + ((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),) + + sample_inputs = [] + for size, dim, size_prepend, size_append in test_cases: + prepend_size = 0 if (size_prepend is None) else size_prepend[dim] + append_size = 0 if (size_append is None) else size_append[dim] + dim_size = size[dim] + prepend_size + append_size + for n in range(dim_size): + input_tensor = make_arg(size) + prepend = make_arg(size_prepend) if size_prepend else None + append = make_arg(size_append) if size_append else None + yield SampleInput(input_tensor, n, dim, prepend, append) + + # add some samples with n > dim_size + yield SampleInput(make_arg((XS, XS, XS)), S + 1, 1) + yield SampleInput(make_arg((XS, XS, XS)), S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS))) + +def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): + input_tensor = make_arg(size) + weight_tensor = make_arg(size) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = make_arg((bin_ct + 1,)) + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) + bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) + + for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): + input_tensor = make_arg(size) + bin_ct = bin_ct_pattern[:size[-1]] + weight_tensor = make_arg(size[:-1]) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = [make_arg(ct + 1) for ct in bin_ct] + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def error_inputs_histogramdd(opinfo, device, **kwargs): + invalid_bins = [1, 1, 1, 1, 1] + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + msg = "histogramdd: The size of bins must be equal to the innermost dimension of the input." + yield ErrorInput(SampleInput(make_arg(5, 6), invalid_bins), error_regex=msg) + +def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, min, max in product(sizes, [0, -10], [0, 10]): + # construct sample input omitting bins arg + yield SampleInput(make_arg(size), min=min, max=max) + + # construct sample inputs with a few different bins values + for bins in [1, 3, 10]: + yield SampleInput(make_arg(size), bins=bins, min=min, max=max) + +def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for size, weighted in product((S, M), [False, True]): + input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) + weight_tensor = make_arg((size,)) if weighted else None + + max_val = int(input_tensor.max().item()) + + for minlength in [0, max_val // 2, max_val, 2 * max_val]: + yield SampleInput( + input_tensor, weights=weight_tensor, minlength=minlength) + +def sample_inputs_bucketize(op_info, device, dtype, requires_grad, reference_inputs_mode=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = (((), S), ((S,), S), ((S, S), S), ((S, S, S), S), ((S, 1, S), S), ((S, 0, S), S)) + + if reference_inputs_mode: + sizes += (((256,), 128), ((128,), 256), ((32, 32), 11), ((32, 4, 32), 33)) + + for (input_shape, nb), out_int32, right in product(sizes, [False, True], [False, True]): + input_tensor = make_arg(input_shape) + boundaries = make_arg(nb).msort() + + yield SampleInput(input_tensor, boundaries, + out_int32=out_int32, right=right) + +reference_inputs_bucketize = partial(sample_inputs_bucketize, reference_inputs_mode=True) + +def error_inputs_bucketize(opinfo, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg((S, S, S)), make_arg((S, S))), + error_regex="boundaries tensor must be 1 dimension") + +def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # (unsorted tensor size, (input sizes,), is_scalar) + sizes = ( + ((0,), ((0,),), False), + ((M,), ((), (M,), (M, M)), False), + ((0, 0), ((0, 0),), False), + ((M, M), ((M, M),), False), + ((0, 0, 0), ((0, 0, 0),), False), + ((M, M, M), ((M, M, M),), False), + ((L,), ((),), True), + ) + + for (size, input_sizes, is_scalar), noncontiguous, out_int32, right in product( + sizes, [False, True], [False, True], [False, True] + ): + unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) + for input_size in input_sizes: + input = make_arg(input_size, noncontiguous=noncontiguous) + if is_scalar: + input = input.item() + if np.prod(size) == 0: + boundary_tensor = unsorted_tensor + sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) + else: + boundary_tensor, sorter = torch.sort(unsorted_tensor) + side = "right" if right else "left" + + yield SampleInput(boundary_tensor, input, out_int32=out_int32, right=right) + yield SampleInput(boundary_tensor, input, out_int32=out_int32, side=side) + + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, right=right, sorter=sorter) + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, side=side, sorter=sorter) + +def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + test_cases_float = ( + ((S,), None, None, 1), + ((S,), 2., None, 1), + ((S, S), None, None, 2), + ((S, S), [2.0, 2.1], None, 1), + ((S, S), [2.0, 2.1], (0, 1), 1), + ((4, 4, 4), [2., 1.], (0, 1), 2), + ) + for size, spacing, dim, edge_order in test_cases_float: + t = make_arg(size) + yield SampleInput(t, dim=dim, spacing=spacing, edge_order=edge_order) + + test_cases_tensor = ( + ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), + ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), + ) + for size, coordinates, dim, edge_order in test_cases_tensor: + t = make_arg(size) + coordinates_tensor_list = [] + for coords in coordinates: + # `coords` will always contain floating point values and Python 3.10 does not support this + # implicit conversion to an integer using `__int__` + # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed + a = torch.tensor(coords, device=device) + coordinates_tensor_list.append(a.to(dtype)) + yield SampleInput(t, dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order) + +def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_args = [ + ([1, 2],), + (slice(0, 3),), + ([slice(0, 3), 1],), + ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), + ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), + ([slice(None), slice(None), [0, 3]],), + ([slice(None), [0, 3], slice(None)],), + ([[0, 3], slice(None), slice(None)],), + ([[0, 3], [1, 2], slice(None)],), + ([[0, 3], ],), + ([[0, 3], slice(None)],), + ([[0, 3], Ellipsis],), + ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), + (index_variable(2, S, device=device),), + (mask_not_all_zeros((S,)),), + ] + + for args in test_args: + yield SampleInput(make_arg((S, S, S)), args=args) + + yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) + +def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for accumulate in [False, True]: + # Test with indices arg + yield SampleInput( + make_arg((S, S,)), + (index_variable(2, S, device=device),), + make_arg((2, S)), + accumulate=accumulate) + + # Test with mask arg + mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) + yield SampleInput( + make_arg((S, S)), (mask, ), make_arg((S,)), accumulate=accumulate) + +def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): + def small_3d_unique(): + res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + def large_1d_unique(): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique()) + + # Test cases for small 3d tensors. + # Imitates legacy tests from test/test_torch.py + dims = range(-3, 3) + flag = [True, False] + for dim, descending, stable in product(dims, flag, flag): + # default schema without stable sort + yield SampleInput(small_3d_unique(), dim, descending) + # schema with stable sort, no CUDA support yet + if torch.device(device).type == 'cpu': + yield SampleInput( + small_3d_unique(), dim=dim, descending=descending, stable=stable) + + # Test cases for scalar tensor + tensor_opt = dict(dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(torch.tensor(1, **tensor_opt)) + yield SampleInput(torch.tensor(1, **tensor_opt), 0) + yield SampleInput(torch.tensor(1, **tensor_opt), 0, True) + + # Test cases for empty tensor + yield SampleInput(torch.tensor((), **tensor_opt)) + yield SampleInput(torch.tensor((), **tensor_opt), 0) + yield SampleInput(torch.tensor((), **tensor_opt), 0, True) + + # Test cases for stable sort + yield SampleInput(small_3d_unique(), stable=True) + yield SampleInput(small_3d_unique(), dim=0, stable=True) + yield SampleInput(small_3d_unique(), dim=0, descending=True, stable=True) + +def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S)) + for x_size in sizes: + # threshold and values args must be numbers + yield SampleInput(make_arg(x_size), make_arg(()).item(), make_arg(()).item()) + +def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for shape, sorted, return_inverse, return_counts, dim in \ + product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): + # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim + if 0 in shape and shape.index(0) is not dim: + continue + + # skip invalid dim args + if dim is not None and (dim < -len(shape) or dim >= len(shape)): + continue + + kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + + # construct a test case with only one distinct value + input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with mixed 0s and 1s + input_t = make_arg(shape, dtype=torch.bool, requires_grad=False)\ + .to(dtype).requires_grad_(requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with many different values + yield SampleInput(make_arg(shape), **kwargs) + +def sample_inputs_unique_consecutive(*args, **kwargs): + for sample_input in sample_inputs_unique(*args, **kwargs): + if not sample_input.kwargs["sorted"]: + sample_input.kwargs.pop("sorted") + yield sample_input + +def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8), (5,)), + ((3, 8, 8), 5), + ((3, 8, 8), 1) + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((1, 8, 8, 8), (5, 7)), + ((2, 8, 8, 8), (None, 7)), + ((1, 8, 4, 3), (5, None)), + ((1, 8, 4, 3), (None, None)), + ((1, 8, 4, 3), (5)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 2") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8, 8, 8), (5, 7, 4)), + ((1, 8, 4, 3, 7), (None, None, None)), + ((1, 8, 4, 3, 7), (1, 1, 1)), + ((3, 3, 8, 8, 6), (5, 7, None)), + ((1, 3, 8, 8, 6), (5, None, 2)), + ((3, 3, 8, 8, 6), (None, 3, 2)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 3") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8), (5,)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((3, 4, 4), 3), + ((3, 4, 4), 1) + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + + +def error_inputs_adaptive_max_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="Trying to create tensor with negative dimension") + +def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8), (5, 7)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 4), (2, 3)), + ((2, 4, 4, 4), (None, 3)), + ((2, 4, 4, 4), (1, 1)), + ((1, 4, 4, 3), (3, None)), + ((1, 4, 4, 3), (None, None)), + ((1, 4, 4, 3), (3)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="Trying to create tensor with negative dimension") + + +def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8, 8), (5, 7, 4)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 3, 5), (None, None, None)), + ((1, 4, 4, 3, 5), (1, 1, 1)), + ((3, 3, 4, 4, 6), (2, 3, None)), + ((1, 3, 4, 4, 6), (3, None, 2)), + ((3, 3, 4, 4, 6), (None, 3, 2)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="Trying to create tensor with negative dimension") + + +class _TestParamsMaxPoolBase: + + def __init__(self): + self.kwargs = { + 'kernel_size': [3], + 'stride': [2, None], + 'ceil_mode': [True, False], + 'padding': [0, 1], + 'dilation': [1], + 'return_indices': [True, False] + } + + self.shapes = [ + [1, 2, None], # batch + [2], # channels + [3, 6] # signal + ] + + def _gen_shape(self): + for shape in product(*self.shapes): + # shape[0] is None indicates missing batch dimension + if shape[0] is None: + shape = shape[1:] + + yield shape, torch.contiguous_format + # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format + if len(self.shapes) == 4 and len(shape) == 4: + yield shape, torch.channels_last + + def _gen_kwargs(self): + keys = self.kwargs.keys() + for values in product(*self.kwargs.values()): + yield dict(zip(keys, values)) + + def gen_input_params(self): + yield from product(self._gen_shape(), self._gen_kwargs()) + +class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3,)] + self.kwargs['stride'] += [(2,)] + self.kwargs['padding'] += [(1,)] + self.kwargs['dilation'] += [(1,)] + +class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2)] + self.kwargs['stride'] += [(2, 1)] + self.kwargs['padding'] += [(1, 1)] + self.kwargs['dilation'] += [(1, 2)] + + self.shapes.append([6]) + +class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2, 3)] + self.kwargs['stride'] += [(2, 1, 2)] + self.kwargs['dilation'] += [(1, 2, 1)] + + self.shapes.append([6]) + self.shapes.append([5]) + +def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + params_generator_type_dict = { + 'nn.functional.max_pool1d': _TestParamsMaxPool1d, + 'nn.functional.max_pool2d': _TestParamsMaxPool2d, + 'nn.functional.max_pool3d': _TestParamsMaxPool3d, + 'max_pool2d_with_indices_backward': _TestParamsMaxPool2d, + } + + params_generator = params_generator_type_dict[op_info.name]() + for (shape, memory_format), kwargs in params_generator.gen_input_params(): + arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) + yield SampleInput(arg, kwargs=kwargs) + +def max_pool2d_backward(*args, kernel_size=(), stride=(), padding=(0,), dilation=(1,), ceil_mode=False, **kwargs): + out, indices = torch.nn.functional.max_pool2d_with_indices( + *args, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=True) + grad_out = torch.ones_like(out) + if stride is None: + stride = kernel_size + out_b = torch.ops.aten.max_pool2d_with_indices_backward.default( + grad_out, *args, kernel_size, stride, padding, dilation, ceil_mode, indices) + return out_b + +def error_inputs_max_pool1d(op_info, device, **kwargs): + # Toggle requires_grad because `max_pool1d` has different path + # based on whether `requires_grad` is set or not. + for requires_grad in (True, False): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=requires_grad) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > ((kernel_size - 1) * dilation + 1) / 2, when dilation is not default + yield ErrorInput(SampleInput(x, + kwargs={'kernel_size': 3, 'dilation': 2, 'stride': 1, 'padding': 3, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for input tensor + error_msg = r'Expected 2D or 3D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((), requires_grad=requires_grad), kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input + yield ErrorInput(SampleInput(torch.tensor([], device=device, requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: unbatched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((0, 10), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((1, 10, 0), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input with stride=0 + error_msg = 'stride must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 0}), + error_regex=error_msg) + + # error inputs for empty input with dilation=0 + error_msg = 'dilation must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), + kwargs={'kernel_size': 1, 'stride': 1, 'padding': 0, 'dilation': 0}), + error_regex=error_msg) + + # error inputs for invalid output size + error_msg = 'Invalid computed output size: -2' + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), + kwargs={'kernel_size': 5, 'stride': 1, 'padding': 0, 'dilation': 1}), + error_regex=error_msg) + + # error inputs when kernel_size=0 + error_msg = 'kernel_size must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 0}), + error_regex=error_msg) + + # error inputs for strides > 0 + error_msg = 'stride must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 0}), + error_regex=error_msg) + + +def error_inputs_max_pool2d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size : int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size : tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected 3D or 4D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((1, 0, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 10, 0)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def error_inputs_max_pool3d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49, 50)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size: int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size: tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected input\'s non-batch dimensions to have positive length' + yield ErrorInput(SampleInput(make_arg((0, 1, 2, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched inputs with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 0, 1, 2)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((2, 1, 4, 5), {'p': 1., 'dim': 2}), + ((2, 3, 4, 5), {'p': 2., 'dim': 1}), + ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), + ((1, 3, 4, 5), {'p': -1., 'dim': 1}), + ((1, 3, 4, 5), {'p': 0., 'dim': -1}), + ((), {'p': 1.2, 'dim': 0}), + ((2, 3, 4, 5), {}), + ((2, 3, 4, 5), {'eps': 1e-4})) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), kwargs=kwargs) + + +def complex_conv(fn, input_size, weight, grad_output, stride, padding, dilation, groups): + # conv(W, x, b) = conv(Wr, xr, br) - conv(Wi, xi, 0) + i(conv(Wi, xr, bi) + conv(Wr, xi, 0)) + # a = conv(Wr, xr, br), + # b = conv(Wi, xi, 0), + # c = conv(Wr + Wi, xr + xi, br + bi) + # conv(W, x, b) = a - b + i(c - a - b) + + grad_output_ = torch.view_as_real(grad_output) + grad_output_r = grad_output_[..., 0] + grad_output_i = grad_output_[..., 1] + + weight_ = torch.view_as_real(weight) + weight_r = weight_[..., 0] + weight_i = weight_[..., 1] + + a = fn(input_size, weight_r, grad_output_r, stride, padding, dilation, groups) + b = fn(input_size, weight_i, grad_output_i, stride, padding, dilation, groups) + c = fn(input_size, weight_r + weight_i, grad_output_r + grad_output_i, stride, padding, dilation, groups) + + return (a - b) + 1j * (c - a - b) + + +def conv_transpose_ref(input, weight, bias, stride=1, padding=0, + output_padding=0, dilation=1, groups=1, + fn=None): + # Derivative of `conv` is `conv_transpose`. + # To verify the correctness of `conv_transpose`, + # we rely `torch.nn.grad` implementation (which is tested in test_nn.py) + # for floating dtypes. + + assert fn is not None + + grad_fn_map = {torch.nn.functional.conv_transpose1d: torch.nn.grad.conv1d_input, + torch.nn.functional.conv_transpose2d: torch.nn.grad.conv2d_input, + torch.nn.functional.conv_transpose3d: torch.nn.grad.conv3d_input} + batched_dim_map = {torch.nn.functional.conv_transpose1d: 3, + torch.nn.functional.conv_transpose2d: 4, + torch.nn.functional.conv_transpose3d: 5} + + # Input for `ref` is ndarray. + input, weight = torch.from_numpy(input), torch.from_numpy(weight) + + is_batched = len(input.shape) == batched_dim_map[fn] + if not is_batched: + input = input.unsqueeze(0) + + if bias is not None: + bias = torch.from_numpy(bias) + unsqueeze_dims = input.ndim - 2 + for _ in range(unsqueeze_dims): + bias = bias.unsqueeze(1) + + grad_output = input + # Get the input shape for grad_fn. + conv_transpose_output = fn(grad_output.to('meta'), weight.to('meta'), None, + stride=stride, padding=padding, output_padding=output_padding, + groups=groups, dilation=dilation) + input_size = conv_transpose_output.shape + + grad_fn = grad_fn_map[fn] + if weight.dtype.is_complex: + out = complex_conv(grad_fn, input_size, weight, grad_output, stride, padding, dilation, groups) + else: # Floating + out = grad_fn(input_size, weight, grad_output, stride, padding, dilation, groups) + + if bias is not None: + out = out + bias + + return out.squeeze(0) if not is_batched else out + + +def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4), (3, 3, 3), (3,), + {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), + ((2, 2, 4), (2, 2, 4), (4,), + {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), + ((1, 1, 4), (1, 1, 4), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), + ((1, 1, 4), (1, 2, 3), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5), (4, 8, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), + ((2, 2, 4, 4), (2, 2, 4, 5), (4,), + {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), + ((1, 1, 4, 5), (1, 1, 4, 3), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 1, 4, 3), (1, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((2, 4, 4, 4), (4, 1, 3, 3), None, {'groups': 4}), + ((1, 2, 5, 5), (2, 4, 3, 3), None, {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + +def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), + {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), + ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), + {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), + ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), + ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias, + # and a dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), + ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), + ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), + ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), + # With defaults + ((1, 4, 5), (3, 4, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4)), args=(make_int_arg((1, 1, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2)), make_arg((1,)))), + error_regex="weight should have at least three dimensions") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def error_inputs_conv2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((2, 4, 4)), args=(make_int_arg((3, 2, 3, 3)), make_arg((3,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((2, 4, 4)), args=(make_arg((3, 2, 3, 3)), make_complex_arg((3,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 2, 2, 3)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2, 4)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 2)), args=(make_arg((1, 1, 2, 5)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 1, 3, 2)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 3-dimensional input for 3-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for groups the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 5)), args=(make_arg((2, 2, 1, 4)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 4, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, groups, dilation) + cases: Tuple = ( + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'groups': 1}), + ((2, 4, 8, 8), (2, 2, 3, 3), (2,), + {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 2, 4, 3), (4, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'groups': 1}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': "valid"}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 1, 'padding': "same", 'dilation': 3}), + # Below are the group related samples from common_nn.py + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), + ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), + ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), + # With defaults + ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv3d(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 1, 4, 4, 4), (1, 1, 1, 1, 1), (1,), {'padding': 'same'}), + ((1, 1, 4, 4, 4), (1, 1, 4, 4, 4), (1,), {'stride': (2, 2, 2)}), + ((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1,), {'dilation': 2}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same', 'dilation': 2}), + ((1, 1, 10, 11, 12), (1, 1, 4, 4, 4), None, {'padding': 'same', 'dilation': 3}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'groups': 3}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'stride': (2, 2, 2), 'dilation': 1, 'groups': 3}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4, 4, 4)), args=(make_int_arg((1, 1, 2, 2, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 3, 3, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 3, 4, 5)), args=(make_arg((1, 1, 4, 3)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 4-dimensional input for 4-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), + error_regex="non-positive groups is not supported") + + # error inputs for padding='same' not supported by strided convolutions + yield ErrorInput( + SampleInput(make_arg((18, 27, 9, 1, 9)), args=(make_arg((9, 9, 9, 1, 9)), + make_arg((9,))), kwargs={'stride': 2, 'padding': 'same', 'groups': 3}), + error_regex="padding='same' is not supported for strided convolutions") + + +def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'eps' : 0.5}), + ((2, 6, 3), 2, {'eps' : -0.5}), + ((1, 3), 1, {'eps' : 1e-5}), + ((0, 2), 1, {'eps' : 1e-5}), + ((S, S, S), 1, {'eps' : 0.5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(make_arg(input_shape), num_groups, **kwargs) + + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=(1,)) + +def reference_inputs_group_norm(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_group_norm( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((20, 6, 10, 10), 3, {'eps' : 1e-5}), + # equivalent with InstanceNorm + # GroupNorm(C, num_groups=C) == InstanceNorm(num_features=C) + ((20, 6, 10, 10), 6, {'eps' : 1e-5}), + # equivalent with LayerNorm + # GroupNorm(C, num_groups=1, affine=False) == LayerNorm(normalized_shape=[C, H, W], elementwise_affine=False) + ((20, 6, 10, 10), 1, {'eps' : 1e-5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + input_tensor = make_arg(input_shape) + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(input_tensor, num_groups, **kwargs) + + +def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), + ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), + ((3, 2, 4), {'momentum': -1.2}), + ((3, 2, 4), {'momentum': 0.0}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] + weight = make_arg(channels) + bias = make_arg(channels) + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + new_kwargs = { + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': weight, + 'bias': bias, + **kwargs + } + + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs=new_kwargs + ) + + # Checking for permutations of weights and biases as `None` + # instance_norm assumes that if there's a bias, there's a weight + weights = [channels, None] + biases = [None, None] + + for weight_channels, bias_channels in zip(weights, biases): + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs={ + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': make_arg(weight_channels) if weight_channels is not None else None, + 'bias': make_arg(bias_channels) if bias_channels is not None else None + } + ) + + # Test case for no optional kwargs + yield SampleInput(make_arg((1, 2, 3)), kwargs={}) + + +def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + + # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, + # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 + + # With weight and a `None` bias + # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) + + # With `None` weight and bias (tests failing for this, see the link above) + # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) + + +def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape, eps + cases: Tuple[Tuple[int], Tuple[int], float] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), 0.5), + ((2, 2, 3), (2, 3), -0.5), + ((1,), (1,), 1e-5), + ((1, 2), (2,), 1e-5), + ((0, 1), (1,), 1e-5), + ) + + for input_shape, normalized_shape, eps in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, None, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, None, eps), + ) + +def sample_inputs_rms_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + +def error_inputs_group_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # check that input has minimum number of dimensions + err_msg1 = "Expected at least 2 dimensions for input tensor but received" + s1 = SampleInput(make_arg(1), args=(1,)) + yield ErrorInput(s1, error_regex=err_msg1) + + # check that the channels dimension is compatible with number of groups + err_msg2 = "Expected number of channels in input to be divisible by num_groups, but got input of shape" + s2 = SampleInput(make_arg((2, 7, 4)), args=(2,)) + yield ErrorInput(s2, error_regex=err_msg2) + +def error_inputs_native_layer_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=(tuple(), None, None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + bias = make_arg((1, 2)) + err_msg3 = "Expected bias to be of same shape as normalized_shape" + s3 = SampleInput( + make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5) + ) + yield ErrorInput(s3, error_regex=err_msg3) + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + +def error_inputs_rms_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=(tuple(), None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + + +def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, size and a kwarg dict for alpha, beta, and k + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), + ((1, 6, 3), 2, {'alpha': 3e-05}), + ((1, 6, 3), 2, {'beta': 0.5}), + ((1, 6, 3), 2, {'k': 1.25}), + ((1, 6, 3), 2, {}), + ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ) + + for input_shape, size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) + +def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): + N = 5 + # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-5, high=5) + return (SampleInput(make_arg((N * 2, N * 2))) for _ in range(1, N)) + +def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4], [8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor = create_tensor(batch_shape + [in_feat]) + weight = create_tensor([out_feat, in_feat]) + if not has_bias: + yield SampleInput(input_tensor, weight) + continue + + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor, weight, bias) + + # 5D tensor, used to crash on MPS, see https://github.com/pytorch/pytorch/issues/114942 + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2)) + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2), create_tensor(4)) + +def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4, 5], [8, 8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor1 = create_tensor(batch_shape + [in_feat1]) + input_tensor2 = create_tensor(batch_shape + [in_feat2]) + weight = create_tensor([out_feat, in_feat1, in_feat2]) + if not has_bias: + yield SampleInput(input_tensor1, input_tensor2, weight) + continue + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor1, input_tensor2, weight, bias) + +def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): + features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for features, batch_shape in itertools.product(features_options, batch_options): + ndim = len(features) + len(batch_shape) + for dim in range(ndim): + input_tensor = create_tensor(batch_shape + features) + dim_size = input_tensor.size(dim) + if dim_size > 0 and dim_size % 2 == 0: + yield SampleInput(input_tensor, dim) + +def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + align_corners_options: Tuple[Any, ...] = (None,) + if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): + align_corners_options = (True, False, None) + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'nearest-exact': [1, 2, 3], + 'linear': [1], + 'bilinear': [2], + 'bicubic': [2], + 'trilinear': [3], + 'area': [1, 2, 3] + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return tuple([N, C] + ([size] * rank)) + return tuple([size] * rank) + + if mode in ('bilinear', 'bicubic') and dtype == torch.uint8: + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for a more close to typical image processing usage + rank = 2 + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg(shape(270, rank), memory_format=memory_format), + shape(130, rank, False), + scale_factor=None, + mode=mode, + align_corners=False, + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for align_corners in align_corners_options: + for rank in ranks_for_mode[mode]: + yield SampleInput( + make_arg(shape(D, rank)), + shape(S, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + yield SampleInput( + make_arg(shape(D, rank)), + shape(L, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + for recompute_scale_factor in [False, True]: + for scale_factor in [1.7, 0.6]: + yield SampleInput( + make_arg(shape(D, rank)), + size=None, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + +def reference_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', 'bicubic'): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + for aa in [True, False]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + scale_factor=None, + mode=mode, + align_corners=False, + antialias=aa, + ) + +def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'bilinear': [2], + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return torch.Size([N, C] + ([size] * rank)) + return torch.Size([size] * rank) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for rank in ranks_for_mode[mode]: + yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), size=shape(L, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=1.7) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) + +def reference_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', ): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide a single sample for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + ) + +def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): + N = 6 + C = 3 + H = 10 + W = 20 + S = 3 + L = 5 + + input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=None, align_corners=False, scale_factors=[1.7, 0.9]) + yield SampleInput(input_tensor, output_size=None, align_corners=True, scale_factors=[0.8, 1.0]) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=None, scales_w=None) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=1.7, scales_w=0.9) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=True, scales_h=1.7, scales_w=0.9) + +def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): + N = 5 + for _ in range(1, N): + for approximate in ['none', 'tanh']: + yield SampleInput( + make_tensor((N * 2, N * 2), device=device, dtype=dtype, + requires_grad=requires_grad, low=-3, high=3), + approximate=approximate) + + +def error_inputs_gelu(op, device, **kwargs): + # Tests that gelu errors out when passed an approximation we don't know. + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}), + error_regex="approximate argument must be either") + + +def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): + inputs = [] + args_for_reduction_with_dim = ( + ((S, S, S), (1,),), + ((S, S, S), (1, True, ),), + ((), (0,),), + ((), (0, True,),), + ) + return ((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *args)) + for input_tensor, args in args_for_reduction_with_dim) + +def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + +def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): + yield from _generate_reduction_inputs(device, dtype, requires_grad) + # NaN only exists for floating point numbers + if dtype.is_complex or dtype.is_floating_point: + yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) + yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_nan_reduction(supports_multiple_dims): + # Generates sample inputs for reduction ops that contain the input tensor + # and dim and keepdim kwargs. If a reduction op needs to test additional + # args/kwargs then create a separate sample_inputs function + def fn(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + yield SampleInput(t.clone().requires_grad_(requires_grad)) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): + yield SampleInput(t.clone().requires_grad_(requires_grad), **kwargs) + + return fn + +def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): + test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) + test_interpolations = ['linear', 'midpoint'] + + for quantiles in test_quantiles: + for t in _generate_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): + # Interpolation kwarg for now is only supported when providing both dim and keepdim + kwargs.setdefault('dim', 0) + kwargs.setdefault('keepdim', False) + for interpolation in test_interpolations: + kwargs['interpolation'] = interpolation + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles, **kwargs) + +def sample_inputs_reduction_count_nonzero(*args, **kwargs): + """Sample inputs for count_nonzero""" + # count_nonzero does not support keepdim yet + for sample in sample_inputs_reduction(*args, **kwargs): + sample.kwargs.pop('keepdim', None) + yield sample + +def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): + N = 10 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + return (SampleInput(make_arg((N, N))) for _ in range(1, N)) + +def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((1, 3, 9, 9), 3), + ((1, 3, 9, 9), (4, 4)), + ((1, 3, 9, 9), (6, 6)), + ((2, 3, 9, 9), (3, 3)), + ((1, 1, 4, 4), (2, 2)), + ((1, 2, 6, 6), (4, 4))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((2, 3, 5, 5, 5), (2, 2, 2)), + ((1, 2, 6, 5, 4), 2), + ((1, 2, 5, 6, 5), (2, 3, 2)), + ((1, 2, 6, 6, 6), (2, 3, 2)), + ((1, 1, 7, 6, 7), (2, 3, 4)), + ((1, 1, 4, 5, 4), (2, 2, 1)), + ((1, 1, 8, 7, 6), (4, 3, 2)), + ((0, 1, 4, 5, 4), (2, 2, 1))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3, 2), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), + ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), + ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), + ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), + ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), + ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) + + for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: + yield SampleInput(make_arg(input_shape), + args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) + # Case with just input_shape and kernel_size + yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) + +def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, kwargs + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 9), (3,), {}), + ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), + ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), + ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), + ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 3, 4, 4), (2, 2, 2), {}), + ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, + count_include_pad=False, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, + count_include_pad=True, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), + ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, + count_include_pad=False, divisor_override=2)), + ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=-2)), + ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, + count_include_pad=True, divisor_override=None)), + ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def error_inputs_avg_pool1d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + +def error_inputs_avg_pool2d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + +def error_inputs_avg_pool3d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49, 50], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + + # error inputs for invalid input dimension + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 0}), + error_regex='non-empty 4D or 5D') + + +def sample_inputs_to(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # test_multiple_devices_to_cuda would fail if we use a different device than given + devices = [device] + if torch.device(device).type == 'cpu': + devices = [torch.device('cpu'), torch.device('cuda:0')] if torch.cuda.is_available() else devices + memory_formats = [torch.preserve_format, torch.channels_last] + + # TODO: can't switch `to.device` overload to use positional arguments + # https://github.com/pytorch/pytorch/issues/84265 + # to.device overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(device, torch.float64, nb, cp), kwargs=kwargs) + + # to.dtype overload + for nb, cp, mem_f in product([True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(torch.float64, nb, cp), kwargs=kwargs) + + # to.other overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + other = make_arg((S, S, S, S), dtype=torch.float64, device=device) + yield SampleInput(make_arg((S, S, S, S)), args=(other, nb, cp), kwargs=kwargs) + + +def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): + def get_tensor_input(size): + return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(get_tensor_input((S, M, S)), 3) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True, True) + + yield SampleInput(get_tensor_input(()), 1) + yield SampleInput(get_tensor_input(()), 1, 0) + yield SampleInput(get_tensor_input(()), 1, -1) + yield SampleInput(get_tensor_input(()), 1, 0, True) + yield SampleInput(get_tensor_input(()), 1, -1, True) + yield SampleInput(get_tensor_input(()), 1, 0, True, True) + yield SampleInput(get_tensor_input(()), 1, -1, True, True) + +def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(M)) + +def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) + ps = (2, 4) + + for size_x, size_y, p in product(sizes, sizes, ps): + yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_index(op_info, device, dtype, requires_grad, reference=False, **kwargs): + # target.index_select(dim, idx) + select = "index_select" in op_info.name + # target.index_add(dim, idx, source, *, alpha=1) + add = "index_add" in op_info.name + # target.index_copy(dim, idx, source) + copy = "index_copy" in op_info.name + # target.index_fill(dim, idx, value) + fill = "index_fill" in op_info.name + + # Extended reference inputs. We generate that exercise atomic adds / writing + # several times to one location + if reference: + make_arg = partial(torch.ones, device=device, dtype=dtype, requires_grad=requires_grad) + make_idx = partial(torch.zeros, device=device, dtype=torch.int64) + else: + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # idx They need to be different for copy and add to be deterministic + if copy or add: + make_idx = partial(torch.randperm, device=device, dtype=torch.int64) + else: + def make_idx(n): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) + + shapes = [(), (1,), (S, S)] + # extra parameter for add + if add: + if dtype == torch.bool: + alphas = (True, False) + else: + alphas = (-1, 0, 2) + else: + alphas = (None,) + + if fill: + # A weird number to catch errors. + # The former one tests `index_fill.int_Scalar`, and the latter one tests `index_fill.int_Tensor`. + values = (make_arg((1,)).item(), make_arg(())) + else: + values = (None,) + + for shape, alpha, value in product(shapes, alphas, values): + t = make_arg(shape) + args = [] + + # dim. We handle the scalar case + dim = -1 if t.ndim == 2 else 0 + args.append(dim) + + idx = make_idx(t.shape[dim] if t.ndim != 0 else 1) + args.append(idx) + + # source + if copy or add: + args.append(make_arg(shape)) + elif fill: + args.append(value) + + args = tuple(args) + kwargs = {} if alpha is None else {"alpha": alpha} + + yield SampleInput(t, args=args, kwargs=kwargs) + +def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) + + shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] + include_selfs = (True, False) + reduce = op_info.variant_test_name + assert reduce in ('prod', 'mean', 'amin', 'amax') + + for shape, include_self in product(shapes, include_selfs): + self_shape, src_shape = shape + # dim. We handle the scalar case + dim = 1 if len(self_shape) >= 2 else 0 + idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, + self_shape[dim] if len(self_shape) != 0 else 1) + args = (dim, idx, make_arg(src_shape), reduce) + yield SampleInput(make_arg(self_shape), + args=args, + kwargs={'include_self' : include_self}) + + # Sample inputs to test edge cases for backward + if requires_grad and reduce == 'prod': + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) + # (c) no zeros reduced (self[2, 1], self[2, 2]) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(0, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): + args = ( + ((S, S, S), (),), + ((S, S, S), (1, ),), + ((S, S, S), (1, True, ),), + ((), (),), + ((), (0,),), + ((), (0, True,),), + # Non-fused mode kernel on CUDA + ((3000,), ()), + ) + make_arg = partial(make_tensor, dtype=dtype, device=device, + requires_grad=requires_grad, low=None, high=None) + return (SampleInput(make_arg(input_tensor), *args) + for input_tensor, args in args) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs + idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] + idx_list = [idx, -idx - 1] + for idx, acc in product(idx_list, (True, False)): + yield SampleInput(input=make_arg((S, S)), + args=(idx.clone(), + make_arg((S,)), + acc)) + + # Scalar cases + scalar_sizes = [(), (1,)] + tgt_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + src_gen = (make_arg(size) for size in scalar_sizes) + for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + + # Empty cases + tgt_sizes = [(0,), (), (1,), (3, 2)] + tgt_gen = (make_arg(size) for size in tgt_sizes) + idx = make_idx((0,), high=1) + src = make_arg((0,)) + for tgt, acc in product(tgt_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + +def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs: take S elements out of S * S + index = make_idx((S,), high=(S * S)) + for idx in (index, -index - 1): + yield SampleInput(input=make_arg((S, S)), args=(idx,)) + + # Scalar cases + scalar_sizes = [(), (1,)] + src_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + for src, idx in product(src_gen, idx_gen): + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + + # Empty cases + src_sizes = [(0,), (), (1,), (3, 2)] + src_gen = (make_arg(size) for size in src_sizes) + + idx = make_idx((0,), high=1) + for src in src_gen: + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + +def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, 1, 2, 3], [3, 2, 1, 0]) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, -1, -2, -3], [-3, -2, -1, -0]) + +def reference_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape, source, destination + args = ( + # empty inputs + ((), (), ()), + # int inputs, negative + ((3, 5, 7, 2), -2, 1), + # swap bounds + ((3, 5, 7, 2), (-1, 0), (0, -1)), + # non-sequential, negative + ((2, 3, 4, 5, 6), (3, -3, 4), (1, 0, -1)), + # idempotence, negative + ((2, 3, 4, 5, 6), (-3, 4, 3, 1), (-3, 4, 3, 1)), + # reverse, sequential, positive + ((6, 2, 3, 5, 4), (4, 3, 2, 1, 0), (0, 1, 2, 3, 4)), + # reverse, non-sequential + ((6, 2, 3, 5, 4), (-3, -2, -4, -5, -1), (2, 1, 3, 4, 0)), + # reverse, sequential, negative + ((6, 2, 3, 5, 4), (4, -2, 2, -4, -5), (-5, 1, 2, -2, -1)), + ) + + for shape, source, destination in args: + yield SampleInput(make_arg(shape), args=(source, destination)) + +def error_movedim_moveaxis(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # source length < destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3), (1, 0, -1))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0, -1\] dims\)"), + ) + + # source length > destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3, 4), (1, 0))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3, 4\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0\] dims\)"), + ) + + # repeated source dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 4, -5), (1, 0, 2))), + error_regex=r"movedim: repeated dim in `source` \(\[0, 4, -5\]\)", + ) + + # repeated destination dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, 2), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `destination` \(\[0, 4, -5\]\)", + ) + + # repeated dim (both), with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, -4), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `source` \(\[1, 0, -4\]\)", + ) + + # out of bounds source inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 1, -6), (1, 4, 2))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 4, 2), (0, 1, -6))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds source input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(-6, 1)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(3, -6)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + +def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) + shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) + + if requires_grad: + # Tests for variant_consistency_jit, grad, gradgrad + # are slower. Use smaller bags of `rep_dims` and `shapes` + # in this case. + rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] + shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] + + is_repeat_op = op_info.name in ['repeat', '_refs.repeat'] + for rep_dim, shape in product(rep_dims, shapes): + # `torch.repeat` errors for `len(rep_dims) < t.dim()`, + # so we filter such combinations. + if is_repeat_op and len(rep_dim) < len(shape): + continue + yield SampleInput(make_arg(shape), rep_dim) + + +def sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + shapes_and_args = ( + ((S, S, S), 1, 2, 2), + ((S, S, S), -1, 2, 2), + ((S, S, S), 1, 0, 0), + ((S, S, S), -1, 0, 0), + ((S, S, S), 2, 1, 2), + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def reference_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + yield from sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, is_narrow=is_narrow, **kwargs) + + shapes_and_args = ( + # 1-dim + ((M,), 0, 0, 0), # 0 elems from the left + ((M,), -1, -1, 0), # 0 elems from the right + ((M,), 0, 5, 3), # 3 elems from the left + ((M,), 0, -5, 2), # 2 elems from the right + ((M,), -1, 0, M), # M elems from the left + ((M,), 0, -M, M), # M elems from the right + + # 2-dim + ((M, S), 1, 0, 0), # dim 1, 0 elems from the left + ((S, M), -2, -1, 0), # dim 0, 0 elems from the right + ((L, S), 1, 2, 3), # dim 1, 3 elems from the left + ((L, S), -1, 3, 2), # dim 1, 2 elems from the left + ((M, L), 0, 0, M), # dim 0, M elems from the left + ((M, L), -1, -L, L), # dim 1, L elems from the right + + # 3-dim + ((L, M, S), 2, 0, 0), # dim 2, 0 elems from the left + ((M, S, L), -1, -1, 0), # dim 2, 0 elems from the right + ((S, L, M), 2, 0, M), # dim 2, M elems from the left + ((L, S, M), -1, -M, M), # dim 2, M elems from the right + ((S, L, M), 1, 0, 0), # dim 1, 0 elems from the left + ((S, L, M), 0, 2, 1), # dim 0, 1 elem from the left + ((M, S, M), -1, -5, 4), # dim 2, 4 elems from the right + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def error_inputs_narrow_narrow_copy(op_info, device, *, is_narrow, is_ref): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # 0-dim + yield ErrorInput(SampleInput(make_arg(()), 0, 0, 1), + error_type=RuntimeError, + error_regex=r"narrow\(\) cannot be applied to a 0-dim tensor\.") + + # out of bounds dim + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=RuntimeError, + error_regex=r"Expected dim < static_cast\(self_sizes.size\(\)\) to be true, but got false\.") + else: + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got 3\)") + # out of bounds dim (negative) + yield ErrorInput(SampleInput(make_arg((L, S, M)), -4, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got -4\)") + + # out of bounds start + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, M + 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got 11\)") + # out of bounds start (negative) + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, -M - 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got -11\)") + + # out of bounds length + yield ErrorInput(SampleInput(make_arg((S, L, M)), 2, 0, M + 1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(11\) exceeds dimension size \(10\)\.") + # out of bounds length (negative) + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(-1\) exceeds dimension size \(10\)\.") + else: + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"narrow\(\): length must be non-negative\.") + + # Test Tensor overload that was added for XLA. Start must be an 0-dim + # integral Tensor. narrow_copy doesn't have this overload. + # https://github.com/pytorch/pytorch/issues/31558 + if is_narrow: + # *1-dim* integral Tensor + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, make_arg(S, dtype=torch.int), 2), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + # 0-dim *bool* Tensor (bools are not allowed) + yield ErrorInput(SampleInput(make_arg((L, M, S)), -3, make_arg((), dtype=torch.bool), 3), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + +def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=None, high=None) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_axes = [ + ((3, 4, 5), 0), + ((3, 4, 5), 1), + ((3, 4, 5), 3), + ((3, 4, 5), -1), + ((3, 4, 5), -3), + ((), 0), + ((), -1), + ((1,), 0), + ((1,), -1), + ] + + for shape, axis in shapes_and_axes: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, axis) + + +def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((0, 1, 5, 5), (2, 3, 5, 5)) + kernel_sizes = (2, (2, 2), (2, 3)) + dilations = (1, 2, (1, 2)) + paddings = (0, 1, (1, 2)) + strides = (1, 2, (1, 2)) + + cases = product(shapes, kernel_sizes, dilations, paddings, strides) + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for shape, kernel_size, dilation, padding, stride in cases: + tensor = make_arg(shape) + yield SampleInput(tensor, kernel_size, dilation, padding, stride) + + # With default args + yield SampleInput(make_arg((1, 1, 5, 5)), (3, 3)) + + +def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((S, 1, S, 1), ()), + ((1, 1, 1, 1), ()), + ((1, 1, 1, 1), (0,)), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (2,)), + ((S, 1, S, 1), (-2,)), + ((), (0, )), + ) + + for shape, args in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, args=args) + + +def sample_inputs_squeeze_multiple(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((1, 1, 1, 1), ()), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (1, 3)), + ((S, 1, S, 1), (1, 2,)), + ((), (0,)), + ) + + for shape, dims in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, dims) + + +def _squeeze_ref(x, axis=None): + # NumPy doesn't allow squeezing scalars + if x.ndim == 0: + return x + + if isinstance(axis, Sequence): + # Numpy doesn't allow specifying non-singular dimensions + axis = tuple(a for a in axis if x.shape[a] == 1) + + if isinstance(axis, int) and x.shape[axis] != 1: + return x + + return np.squeeze(x, axis) + +def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): + assert mode in ('constant', 'reflect', 'replicate', 'circular') + if mode in ['reflect', 'replicate']: + cases: tuple = ( # ignore + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + elif mode == 'constant': + cases = ( + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((1, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((0, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3), (1, 1, 1, 1, 1, 1)), + ((0, 3, 3, 3), (1, 2)), + ((0, 3, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((3, 3, 5, 5), (1, 2)), + ((3, 3, 5, 5), (0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 2)), + ((1, 3, 3, 3, 3), (0, 1)), + ((1, 3, 3, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + else: # mode == 'circular' + if dtype == torch.bool: + # test_dtypes fails on ASAN with for the case ab + # runtime error: load of value 190, which is not a valid value for type 'bool' + # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 + # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 + cases = ( + ((2, 3, 3), (1, 2)), + ((1, 3, 3), (1, 2)), + ) + else: + cases = ( + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if mode == 'constant': + # Default args + yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) + + if mode in ['reflect', 'replicate', 'circular']: + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode)) + else: # mode == 'constant' + for pad_value in (1., 2.): + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) + +def sample_inputs_nn_pad_replicate_negative(op_info, device, dtype, requires_grad, **kwargs): + cases: tuple = ( + ((5, 3, 4, 4), (-4, 5, 0, 0)), + ((6, 2, 4, 4), (0, 0, 2, -4)), + ((5, 6, 4, 4), (5, -4, -4, 3)), + ((4, 2, 5, 5), (-2, -1, 4, 6)), + ((2, 6, 5, 5), (8, -1, -1, -3)), + ((8, 1, 5, 5), (-2, -1, -1, -3)), + ) + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, 'replicate')) + +def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs): + # Inherit sample inputs from nn.pad, but transform them to fit + # constant_pad_nd's interface + nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args, + mode='constant', **kwargs) + + # NOTE: primTorch is more strict about the type of the fill value argument + # So we must cast it to the correct dtype + from torch._prims_common import dtype_to_type + scalar_type = dtype_to_type(dtype) + + def drop_mode_argument(input, pad, mode=None, value=None): + if value is None: + return SampleInput(input, args=(pad,)) + else: + return SampleInput(input, args=(pad, scalar_type(value))) + + for sample in nn_samples: + yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs) + +def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(()), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2, dim=1) + yield SampleInput(make_input((2, 3, 4)), repeats=torch.arange(3, device=device), dim=1) + + +def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=requires_grad, **kwargs) + + yield SampleInput(mt(100), n_fft=10, return_complex=True) + yield SampleInput(mt(100), n_fft=10, return_complex=False) + if dtype.is_complex: + yield SampleInput(mt(100), n_fft=10) + + for center in [False, True]: + yield SampleInput(mt(10), n_fft=7, center=center, return_complex=True) + yield SampleInput(mt((10, 100)), n_fft=16, hop_length=4, + center=center, return_complex=True) + + window = mt(16, low=.5, high=2.0) + yield SampleInput( + mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + yield SampleInput( + mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + if not dtype.is_complex: + yield SampleInput( + mt((10, 100)), n_fft=16, window=window, onesided=False, + return_complex=True) + + +def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def mt(shape, **kwargs): + real_shape = shape if dtype.is_complex else shape + (2,) + return make_arg(real_shape, **kwargs) + + yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) + yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) + yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) + + for center in [False, True]: + yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) + yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) + + window = make_arg(10, low=.5, high=2.0) + yield SampleInput(mt((10, 10, 6)), kwargs=dict( + n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) + yield SampleInput(mt((10, 10, 10)), kwargs=dict( + n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) + + real_window = window if not dtype.is_complex else window.real + yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) + +def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): + # create a helper function wrapping `make_tensor` + make_input = partial(make_tensor, dtype=dtype, device=device, low=-1, high=1) + + batches = [(), (0, ), (2, ), (2, 1)] + ns = [5, 2, 0] + tf = [True, False] + for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): + input = make_input((*batch, m, n)) + reflectors, tau = torch.geqrf(input) + reflectors.requires_grad_(requires_grad) + tau.requires_grad_(requires_grad) + other_matrix_shape = (m, n) if left else (n, m) + other = make_input((*batch, *other_matrix_shape), requires_grad=requires_grad) + yield SampleInput(reflectors, tau, other, left=left, transpose=transpose) + + +def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): + cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False + ) + + for sample in cholesky_inverse_samples: + psd_matrix = sample.input + sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + sample.args = (psd_matrix.requires_grad_(requires_grad),) + yield sample + + +def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, + dtype=dtype, device=device, requires_grad=requires_grad) + + # not needed once OpInfo tests support Iterables + batch_shapes = ((), (3,), (3, 3)) + for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): + shape = batch_shape + (S + size_delta, S) + input = make_arg(*shape) + yield SampleInput(input, args=(True, get_infos)) + + +def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): + def out_fn(output): + return output[1], output[2] + + for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): + lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) + lu_data.requires_grad_(requires_grad) + yield SampleInput(lu_data, pivots).with_metadata(output_process_fn_grad=out_fn) + + +def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) + + for arg in args: + yield SampleInput(make_arg((0, 0, 0)), args=arg) + yield SampleInput(make_arg((S, S, S)), args=arg) + + # Scalar tensor + yield SampleInput(make_arg(()), args=(10, )) + +def error_inputs_roll(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "`shifts` required" + s1 = SampleInput(make_arg((S,)), ()) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = ("shifts and dimensions must align") + s2 = SampleInput(make_arg((S, S)), (2, 1), 0) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = ("out of range") + s3 = SampleInput(make_arg((S, )), 0, 2) + yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) + + err_msg4 = ("Dimension specified as 0") + s4 = SampleInput(make_arg(()), 0, 0) + yield ErrorInput(s4, error_regex=err_msg4, error_type=IndexError) + +def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) + + yield SampleInput(make_arg((S, S, S))) + for arg in args: + yield SampleInput(make_arg((S, S, S)), args=arg) + + +def error_inputs_rot90(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "expected total rotation dims" + s1 = SampleInput(make_arg((S, S)), dims=(0,)) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = "expected total dims >= 2" + s2 = SampleInput(make_arg((S,))) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = "expected rotation dims to be different" + s3 = SampleInput(make_arg((S, S)), dims=(1, 1)) + yield ErrorInput(s3, error_regex=err_msg3) + + +def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): + tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, + requires_grad=requires_grad) + tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, + requires_grad=requires_grad) + + yield SampleInput(tensor_nd()) + yield SampleInput(tensor_nd(), dim=1) + yield SampleInput(tensor_nd(), dim=1, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=False, keepdim=False) + + yield SampleInput(tensor_nd(), dim=(1,), correction=1.3) + yield SampleInput(tensor_nd(), dim=(1,), correction=S // 2) + yield SampleInput(tensor_nd(), dim=None, correction=0, keepdim=True) + yield SampleInput(tensor_nd(), dim=None, correction=None) + yield SampleInput(tensor_nd(), correction=0, keepdim=True) + yield SampleInput(make_tensor(3, 4, 5, device=device, dtype=dtype, requires_grad=requires_grad), dim=-3) + + +def sample_inputs_std_var_unbiased(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad) + + # Test var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + yield SampleInput(make_arg((S, S)), True) + yield SampleInput(make_arg((S,)), False) + + +def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): + shapes = [(2,), (1, 2), (3, 2), (2, 3)] + for shape in shapes: + yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + + +def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): + return (SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)) + + +def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_correlation_inputs(device, dtype, requires_grad): + yield SampleInput(t) + num_observations = t.numel() if t.ndimension() < 2 else t.size(1) + fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) + aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) + for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): + yield SampleInput(t.clone().requires_grad_(requires_grad), + correction=correction, fweights=fw, aweights=aw) + + +def error_inputs_cov(op_info, device, **kwargs): + a = torch.rand(S, device=device) + yield ErrorInput( + SampleInput(torch.rand(S, S, S, device=device)), + error_regex="expected input to have two or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, S, device=device)), + error_regex="expected fweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(S, S, device=device)), + error_regex="expected aweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, device=device)), + error_regex="expected fweights to have integral dtype") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([1, 1], device=device)), + error_regex="expected aweights to have floating point dtype") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([1], device=device)), + error_regex="expected fweights to have the same numel") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(1, device=device)), + error_regex="expected aweights to have the same numel") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([-1, -2, -3, -4 , -5], device=device)), + error_regex="fweights cannot be negative") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([-1., -2., -3., -4., -5.], device=device)), + error_regex="aweights cannot be negative") + + +def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = [((1, 2, 3, 4), (0, 2, 3, 1)), + ((1, 2, 3, 4), (0, -2, -1, 1)), + ((), ()), + ((1, 2, 3, 4), (2, 1, 3, 0))] + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=(args,)) + +def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((), ()), + ((1,), (0,)), + ((2, 2), (1, 0)), + ((2, 2), (0, 1)), + ((2, 0, 1), (0, 2, 1)), + ((3, 4, 2), (2, 1, 0)), + ((3, 4, 2), (1, 0, 2)), + ((3, 4, 2), (0, 1, 2)), + ) + + # Adds tricky permutations and permutations with noncontiguity + for shape, permutation in cases: + for p in itertools.permutations(permutation): + a = make_arg(shape).permute(p) + yield SampleInput(a, args=(permutation,)) + + a = make_arg(shape, noncontiguous=True).permute(p) + yield SampleInput(a, args=(permutation,)) + +def error_inputs_softshrink(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}), + error_regex="lambda must be greater or equal to 0, but found to be -0.5") + +def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for lbda in (0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + # Note that unlike softshrink, lambd is allowed to be negative for hardshrink + for lbda in (-0.5, 0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of min_val and max_val beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for max_val, min_val in ((0.5, -0.5), (0., 0.)): + yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def error_inputs_hardtanh(op_info, device, **kwargs): + # Tests that hardtanh errors out when passed min_val > max_val. + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"min_val": 0.5, "max_val": -0.5}), + error_type=ValueError, error_regex="min_val cannot be greater than max_val") + +def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): + def c(t): + return t.clone().requires_grad_(requires_grad) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg((3,)) + y = make_arg((4,)) + A = make_arg((2, 3,)) + B = make_arg((1, 3,)) + C = make_arg((1, 2, 3,)) + D = make_arg((1, 3, 4,)) + E = make_arg((4, 4,)) + H = make_arg((3, 3,)) + I = make_arg((1, 3, 1,)) + + # Vector operations + yield SampleInput([c(x)], 'i->') # sum + yield SampleInput([c(x), c(y)], 'i,j->ij') # outer + + # Matrix operations + yield SampleInput([c(A)], "ij->i") # col sum + yield SampleInput([c(A), c(B)], "ij,kj->ik") # matmul + yield SampleInput([c(A), c(E)], "ij,Ab->ijAb") # matrix outer product + + # Tensor operations + yield SampleInput([c(C), c(D)], "aij,ajk->aik") # batch matmul + yield SampleInput([c(D), c(E)], "aij,jk->aik") # tensor matrix contraction + yield SampleInput([c(C), c(B)], "ijk,ik->j") # non contiguous + + # Test diagonals + yield SampleInput([c(I)], 'iji->j') # non-contiguous trace + + # Test ellipsis + yield SampleInput([c(H)], "i...->...") + yield SampleInput([c(C), c(x)], '...ik, ...j -> ij') + + +def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((S, M, S), (S, 0, M)) + all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) + + for size, dims in product(sizes, all_dims): + yield SampleInput(make_arg(size), kwargs={"dims": dims}) + +def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): + shapes = [ + (S, M, S), + (S, 0, M), + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + return (SampleInput(make_arg(shape, low=None, high=None)) for shape in shapes) + +def error_inputs_fliplr(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), + error_regex="Input must be >= 2-d.") + +def error_inputs_flipud(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), + error_regex="Input must be >= 1-d.") + +def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + make_integral_arg = partial(make_tensor, dtype=torch.int32, device=device, low=None, high=None, requires_grad=False) + shape = (S, M, S) + + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape[1:]), make_arg(shape[1:]))) + yield SampleInput(make_arg(shape), args=(make_arg((S, 1, S)),)) + yield SampleInput(make_arg(shape), args=(None, make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape), None)) + # test type promotion + yield SampleInput(make_arg(shape), args=(make_integral_arg(shape), None)) + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_integral_arg(shape))) + +def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): + yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) + supported_dtypes = op.supported_dtypes(device) + + # broadcasting and oncontiguous cases + cases = ( + ((4, 4), (4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 1, 4)), + ((4, 4, 1), (1, 4, 4), (4, 4)), + ((4, 1), (1, 4, 4), (1, 4)), + ((4, 4), (), (4, 4)), + ((4, 4), (), ()), + ((), (4, 4), (1, 4, 4)), + ) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) + yield SampleInput(make_arg(a, noncontiguous=True), + args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) + + # scalar cases + if supports_scalars: + cases = [ + ((), 1, 2,), + ((), 1., 2), + ((4, 4), 1., 2,), + ((3, 4), make_scalar_tensor(), make_scalar_tensor()), + ] + + if torch.complex64 in supported_dtypes: + cases.extend([ + ((3, 1, 4), complex(1, 2), 3.), + ]) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(b, c)) + + # type promotion cases + # int x float + if torch.float in supported_dtypes and torch.long in supported_dtypes: + a = make_arg((), dtype=torch.long) + b = make_arg((1, 4), dtype=torch.float) + c = make_arg((3, 4)) + + cases = ( + (a, b, c), + (c, a, b), + ) + + for a, b, c in cases: + yield SampleInput(a, args=(b, c)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) + + a = make_arg((12,)) + a[4] = nan + a[7] = nan + b = make_arg((12,)) + b[1] = nan + b[7] = nan + c = make_arg((12,)) + c[9] = nan + + yield SampleInput(a, args=(b, c)) + + +def _clamp_min_numpy(a, min=None): + return np.maximum(a, min) + + +def _clamp_max_numpy(a, max=None): + return np.minimum(a, max) + + +def _clamp_numpy(a, min=None, max=None): + if min is None: + return np.minimum(a, max) + if max is None: + return np.maximum(a, min) + + return np.minimum(max, np.maximum(a, min)) + + +def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_zeros(dim_select): + assert len(dim_select) == 2 + result = make_arg(3 * (S,)) + result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() + result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() + result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() + return result + + for dim in range(3): + yield SampleInput(make_arg((S, S, S)), args=(dim,)) + # Scalar tensors and empty tensor + for size in [(), (1,), (0,)]: + yield SampleInput(make_arg(size), args=(0,)) + + yield SampleInput(prod_zeros([0, 1]), args=(1,)) + yield SampleInput(prod_zeros([0, 2]), args=(1,)) + yield SampleInput(prod_zeros([1, 2]), args=(1,)) + + # test dtype kwarg + yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) + +def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S), ()) + return (SampleInput(make_arg(size)) for size in sizes) + +def error_inputs_complex(op_info, device, is_ref=False, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + + if is_ref: + error_float = "Expected both inputs to be Half, Float or Double tensors but got torch.float32 and torch.int32" + error_dtype = "Expected object of scalar type torch.float32 but got scalar type torch.float64 for second argument" + error_out = "Expected out tensor to have dtype torch.complex128 but got torch.complex64 instead" + else: + error_float = "Expected both inputs to be Half, Float or Double tensors but got Float and Int" + error_dtype = "Expected object of scalar type Float but got scalar type Double for second argument" + error_out = "Expected object of scalar type ComplexDouble but got scalar type ComplexFloat for argument 'out'" + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.int)), + error_type=RuntimeError, error_regex=error_float) + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.float64)), + error_type=RuntimeError, error_regex=error_dtype) + + yield ErrorInput(SampleInput(make_arg(M, S, dtype=torch.float64), make_arg(M, S, dtype=torch.float64), + out=make_arg(M, S, dtype=torch.complex64)), + error_type=RuntimeError, error_regex=error_out) + +def sample_inputs_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = (S, S) + yield SampleInput(make_arg(shape), make_arg(shape)) + +def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_single_zero(): + result = make_arg(2 * (S,)) + result[0, 1] = 0 + return result + + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + # only Tensor, ignore other inputs + yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) + yield sample + + # Generates samples with keepdim = True + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + sample.kwargs['keepdim'] = True + yield sample + + yield SampleInput(prod_single_zero()) + yield SampleInput(make_arg((3, 3, 3)), args=(1,)) + yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) + + yield SampleInput(make_arg((3, 0)), args=(1,)) + yield SampleInput(make_arg((3, 0)), args=(1,), kwargs={'keepdim': True}) + yield SampleInput(torch.tensor([2., 3, 0, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + + # test zero scalar tensor + zero = make_arg(()) + zero.zero_() + yield SampleInput(zero.clone().requires_grad_(requires_grad)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), + args=(0,), + kwargs={'keepdim': True}) + +def error_inputs_neg(op_info, device, **kwargs): + si = SampleInput(torch.tensor((False, True), device=device)) + msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." + " If you are trying to invert a mask, use the `\\~` or" + " `logical_not\\(\\)` operator instead.") + yield ErrorInput(si, error_regex=msg) + +def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg(M)) + + tensors = ( + make_arg((M, M)), + make_arg((3, 5)), + make_arg((5, 3)), + ) + + args = ((), (2,), (-2,), (1,), (2,)) + + for tensor, arg in product(tensors, args): + yield SampleInput(tensor.clone().requires_grad_(requires_grad), *arg) + +def reference_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_diagonal_diag_embed( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes1d = ((0,), (1,)) + shapes2d = ((L, M),) + shapes3d = ((L, M, S),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 > dim2 is allowed + dict(dim1=1, dim2=0), + # negative dims are allowed + dict(dim1=-2, dim2=-1), + # one dim negative and the other nonnegative is allowed + dict(dim1=-1, dim2=0), + # out of bounds offset should return an empty tensor in diagonal and + # offset the diagonal in diag_embed + dict(offset=100), + ) + + kwargs3d = kwargs2d + ( + # make sure we can use non-sequential dims + dict(offset=-1, dim1=0, dim2=2), + ) + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + if 'diagonal' in op_info.name: + # these are error inputs for diagonal + if shape in ((0,), (1,)): + continue + yield SampleInput(input=make_arg(shape), kwargs=kwargs) + + +def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # Shapes for 2D Tensors + shapes_2d = ((M, M), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((M, M, M),) + + args_2d = ((), (2,), (-2,), (1,)) + args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) + + for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): + input_ = make_arg(input_shape) + # We can programmatically figure out the right shape for src: + # It should be the same size as input.diagonal(other_args...) + if not isinstance(arg, tuple): + arg_tuple = (arg,) + else: + arg_tuple = arg + src_shape = input_.diagonal(*arg_tuple).size() + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *arg_tuple)) + + +def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S))).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + yield SampleInput(make_arg((S, S)), 1).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + +def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): + batch_size, num_classes = shape = (2, 3) + reductions = ("mean", "sum", "none") + + input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ + (shape, {}), + ((*shape, 1), {}), + ((*shape, 1, 2), {}), + ((*shape, 1, 2, 3), {}), + *[(shape, dict(reduction=reduction)) for reduction in reductions], + *[ + ( + shape, + dict( + weight=make_tensor((num_classes,), device=device, dtype=dtype), + reduction=reduction, + ), + ) + for reduction in reductions + ], + (shape, dict(ignore_index=1)), + ] + + for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): + input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) + + if probabilities_target: + # ignore_index is not supported for probabilities target + if "ignore_index" in kwargs: + continue + + target = make_tensor( + input_shape, + low=0, + high=1, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + else: + target = make_tensor( + (batch_size, *input_shape[2:]), + low=0, + high=num_classes, + device=device, + dtype=torch.long, + ) + + if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): + # make sure at least one item in target is not ignored + target[0] = random.sample(sorted(set(range(num_classes)) - {kwargs["ignore_index"]}), 1)[0] + + yield SampleInput(input, target, **kwargs) + + +def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): + low, high = op_info.domain + + # Note: Operator is very sensitive at points near the + # start and end of domain and leads to NaN for float16 + # if domain_eps is 1e-5. + if dtype.is_floating_point or dtype.is_complex: + domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 + + low = low + domain_eps + high = high - domain_eps + + make_arg = partial(make_tensor, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg((S, S, S)), 0.2) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg(()), 0.2) + +def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # isin has two paths based on the size of elements and test_elements. + # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): + yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) + # else: + yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) + +def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), + broadcasts_input=True) + +def error_inputs_masked_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float) + for mask_dtype in [torch.float, torch.uint8]: + yield ErrorInput(SampleInput(make_arg(1, 3), args=(torch.ones(1, 3, device=device, dtype=mask_dtype), + make_arg(3, 4))), + error_regex=r"masked_scatter_ only supports boolean masks") + +def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) + + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg(())), + broadcasts_input=True) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, 10), + broadcasts_input=True) + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CUDA but `value` is a CPU scalar tensor. + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, torch.randn(()))) + +def error_inputs_masked_fill(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # `value` is not a 0-D tensor. + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))), + error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension") + # downcasting complex value (scalar overload) + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)), + error_regex=r"value cannot be converted to type .* without overflow") + # downcasting complex value (tensor overload) + yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device), + args=(make_arg(()) > 0, torch.tensor(1j, device=device))), + error_regex=r"value cannot be converted to type .* without overflow") + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CPU but `value` is a CUDA scalar tensor. + yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'), + args=(torch.randn(S, S, device='cpu') > 0, + torch.randn((), device='cuda'))), + error_regex=r"to be on same device") + + +def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + + yield SampleInput(make_arg((M, M)), torch.randn(M, M, device=device) > 0) + + yield SampleInput(make_arg((M, M)), torch.randn((M,), device=device) > 0) + yield SampleInput(make_arg((M,)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg((M, 1, M)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg(()), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg((M, M)), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg(()), torch.randn((M, M), device=device) > 0) + +def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S))) + yield SampleInput(make_arg((S, S, S))) + +def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, + high=None, requires_grad=requires_grad) + test_cases = (((L,), (L,)), + ((S, M), (M,)), + ((M,), (M, S)), + ((S, M), (M, S)), + ((S, 0), (0, M)), + ((S, S, M), (M,)), + ((S, S, M), (M, S)), + ((S, S, 0), (0, S)), + ((M,), (S, M, S)), + ((S, M), (S, M, S)), + ((0, 0), (S, 0, 0)), + ((S, S, M, M), (S, S, M, S)), + ((S, S, M, M), (M,)), + ((M,), (S, S, M, S)), + ((S, S, S), (1, S, S)) + ) + for lhs_shape, rhs_shape in test_cases: + lhs = make_arg(lhs_shape) + rhs = make_arg(rhs_shape) + if not is_rmatmul: + yield SampleInput(lhs, rhs) + else: + yield SampleInput(rhs, lhs) + + +def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, + requires_grad: bool, + *, variant: str, **kwargs) -> List[SampleInput]: + if variant == 'variadic': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return tensors + elif variant == 'list': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return [tensors] + else: + raise ValueError( + 'Unsupported variant, must be one of {"variadic", "list"}. ' + f'Got "{variant}".') + + SCALAR = torch.Size([]) + VECTOR = torch.Size([3]) + test_cases: List[List[torch.Size]] = [ + [SCALAR], + [VECTOR], + [VECTOR, SCALAR], + [VECTOR, SCALAR, VECTOR], + [VECTOR, SCALAR, VECTOR, SCALAR], + ] + + for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): + args = make_inputs( + [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes]) + yield SampleInput(*args, indexing=indexing) + + +def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + # Since the accepted lower bound for input + # to mvlgamma depends on `p` argument, + # the following function computes the lower bound + # which we pass to `make_tensor`. + def compute_min_val(p): + return (p - 1.) / 2 + + for shape, n in product(tensor_shapes, ns): + min_val = compute_min_val(n) + if not dtype.is_floating_point: + # Round-up minimum value for integral dtypes + min_val += 1 + else: + min_val += 2 * torch.finfo(dtype).eps + yield SampleInput(make_arg(shape, low=min_val), args=(n,)) + + +# Since `mvlgamma` has multiple entries, +# there are multiple common skips for the additional +# entries. Following function is a helper to that end. +def skips_mvlgamma(skip_redundant=False): + skips = ( + # outside domain values are hard error for mvlgamma op. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.float16, torch.int8)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=(torch.int8,)), + ) + if skip_redundant: + # Redundant tests + skips = skips + ( # type: ignore[assignment] + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + ) + return skips + + +# To test reference numerics against multiple values of argument `p`, +# we make multiple OpInfo entries with each entry corresponding to different value of p. +# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. +def make_mvlgamma_opinfo(variant_test_name, domain, skips, sample_kwargs): + return UnaryUfuncInfo('mvlgamma', + ref=reference_mvlgamma if TEST_SCIPY else None, + aliases=('special.multigammaln',), + variant_test_name=variant_test_name, + domain=domain, + decorators=(precisionOverride({torch.float16: 5e-2}),), + dtypes=all_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_mvlgamma, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=skips, + sample_kwargs=sample_kwargs) + + +def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): + def _make_tensor_helper(shape, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(_make_tensor_helper((S, S, S)), 0) + yield SampleInput(_make_tensor_helper((S, S, S)), 1) + yield SampleInput(_make_tensor_helper(()), 0) + + if supports_dtype_kwargs: + # NOTE: if `dtype` is not same as input, then inplace variants fail with + # `provided dtype must match the dtype of self tensor in cumsum` + yield SampleInput(_make_tensor_helper((S, S, S)), 1, dtype=dtype) + + +def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((), (0, 1, 1)), + ((S, S, S, S), (0, 3, 1)), + ((S, S, S, S), (1, 3, 1)), + ((S, S, S, S), (2, 3, 1)), + ((S, S, S, S), (3, 3, 1)), + ((S, S, S, S), (0, 3, 2)), + ((S, S, S, S), (1, 3, 2)), + ((S, S, S, S), (2, 3, 2)), + ((S, S, S, S), (3, 3, 2)), + ((S, S, S, S), (0, 4, 1)), + ((S, S, S, S), (1, 4, 1)), + ((S, S, S, S), (2, 4, 1)), + ((S, S, S, S), (3, 4, 1)), + ((M,), (0, 3, 1)), + ((M,), (0, 3, 2)), + ((M,), (0, 3, 3)), + ((1000,), (0, 3, 11)), + ((1000,), (0, 2, 27)), + ((10, 10), (0, 1, 2)), + ((10, 10), (1, 2, 3)), + ((10, 10), (1, 2, 2)), + ((S, S, S), (2, 3, 2)), + ) + + for shape, arguments in test_cases: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *arguments) + +def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if list_args: + cases = ( + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), 2),), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), -2),) + ) + else: + cases = ( # type: ignore[assignment] + ((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = (((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3), 0]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), 2)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), -2)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): + def apply_grad(t): + if dtype in floating_types_and(torch.float16, torch.bfloat16): + t.requires_grad_(requires_grad) + + def large_1d_unique(dtype, device): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype) + apply_grad(res) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique(dtype, device)) + + yield SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + +def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4) + # broadcast rhs with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S,)), make_arg((S, S))) + # broadcast rhs and weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, 1)), make_arg((S,))) + # broadcast lhs + yield SampleInput(make_arg((S,)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # scalar broadcast_lhs + yield SampleInput(make_arg(()), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # tensor broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), make_arg((S, 1))).with_metadata( + broadcasts_input=True) + # no broadcast with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, S)), make_arg((S, S))) + # broadcast lhs with weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor variant + yield SampleInput(make_arg((S, S)), make_arg((S, S, S)), make_arg((S,))).with_metadata( + broadcasts_input=True) + + if dtype.is_complex: + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 1.2 + 0.1j) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 5.4 + 9j) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4j) + yield SampleInput(make_arg(()), make_arg(()), 6.1 + 0.004j) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg(()), 1 + 2j) + +def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): + cases = ( + ((2, 2, 2), (2, 2, 2), (2)), + ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), + ) + for first_shape, second_shape, dims in cases: + yield SampleInput(make_tensor(first_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + make_tensor(second_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + dims=dims) + +def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + test_cases = ( + ((S, S), (M, L)), + ) + + for input_shape, other_shape in test_cases: + input = make_arg(input_shape) + other = make_arg(other_shape) + yield SampleInput(input, other) + +def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(S)) + yield SampleInput(make_arg(), make_arg(S, S)) + +def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), + (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), + (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor(()), (0, zero.clone().detach(), _tensor(()))), + (_tensor(()), (0, zero.clone().detach(), 2.5)), + ) + + for tensor, args in test_cases: + yield SampleInput(tensor, *args) + + if not requires_grad: + yield SampleInput(tensor.clone().detach(), *args, reduce='add') + + if dtype.is_floating_point: + yield SampleInput(tensor.clone().detach(), *args, reduce='multiply') + +def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + yield SampleInput(_tensor((M, S)), 0, _gather((S, S), 1, M), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), -1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 0, _gather((M, S // 2), 1, M), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), 1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), -1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor(()), 0, zero.clone().detach(), _tensor(())) + +def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + gather = partial(gather_variable, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + ((M, S), 0, gather((S, S), 1, M), (S, S)), + ((M, S), 1, gather((S, S), 0, S), (S, S)), + ((M, S), -1, gather((S, S), 0, S), (S, S)), + ((M, S), 0, gather((M, S // 2), 1, M), (M, S // 2)), + ((M, S), 1, gather((M, S // 2), 0, S), (M, S // 2)), + ((M, S), -1, gather((M, S // 2), 0, S), (M, S // 2)), + ((), 0, zero.clone().detach(), ()), + ) + + reduce = op_info.variant_test_name + for (inp_shape, dim, index, src_shape), include_self in product(test_cases, [False, True, False]): + yield SampleInput(make_arg(inp_shape), + args=(dim, index, make_arg(src_shape), reduce), + kwargs={'include_self': include_self}) + + + # Sample inputs to test edge cases for backward + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + if requires_grad and reduce == 'prod': + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) + # (c) no zeros reduced (self([2, 1])) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(1, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + # inp_shape, dim, lengths, unsafe + ((S,), 0, [0, 1, 2, 2], False), + ((S,), 0, [0, 1, 2, 2], True), + ((S,), 0, [2, 0, 3, 0], False), + ((S, S), 0, [0, 1, 2, 2], False), + # test when lengths do not sum to dim size + ((M, S, S), 0, [1, 2, 0, 6, 0], True), + # test for higher dimensions + ((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ) + + reductions = ["max", "mean", "min", "sum", "prod"] + for args, reduce, initial in product(test_cases, reductions, [1, 2]): + inp_shape, dim, lengths, unsafe = args + lengths_t = torch.tensor(lengths, dtype=torch.long, device=device) + sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial} + if mode == 'lengths': + sample_input_kwargs['lengths'] = lengths_t + elif mode == 'offsets': + zeros_shape = list(lengths_t.shape) + zeros_shape[dim] = 1 + offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim) + sample_input_kwargs['offsets'] = offsets_t + else: + raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.") + yield SampleInput(_tensor(inp_shape), + args=(reduce,), + kwargs=sample_input_kwargs) + + +def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg((S, S, S), noncontiguous=True)) + +def sample_inputs_unravel_index(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput( + torch.tensor( + [[3, 8, 13], [0, 5, 10]], + device=device, + dtype=dtype), + (4, 5)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (4, 2**30)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (2**30, 4)) + yield SampleInput( + torch.tensor(2, device=device, dtype=dtype), + (2, 2)) + max_val = 2**(8 * dtype.itemsize - (1 if dtype.is_signed else 0)) - 1 + yield SampleInput( + torch.tensor(max_val - 1, device=device, dtype=dtype), + (1, max_val)) + yield SampleInput( + torch.tensor([22, 41, 37], device=device, dtype=dtype), + (7, 6)) + yield SampleInput( + torch.tensor(min(1621, max_val), device=device, dtype=dtype), + (6, 7, 8, 9)) + yield SampleInput( + torch.tensor([], device=device, dtype=dtype), + (10, 3, 5)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], + device=device, + dtype=dtype), + (5, 8)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + device=device, + dtype=dtype), + (5, 8, 10)) + yield SampleInput( + torch.tensor(0, device=device, dtype=dtype), + ()) + + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + _, i1, i2 = np.intersect1d(a, b, assume_unique=True, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + _, i1, i2 = np.intersect1d(a, b, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + +def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((M, M), ()), + ((M, M), (2,),), + ((M, S), ()), + ((M, S), (-1,)), + ((M, M), (2,),), + ((S, M, S), ()), + ((S, M, S), (2,)), + ((3, 3, S, S), ()),) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def error_inputs_tril_triu(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for input.ndim <= 2 + yield ErrorInput(SampleInput(make_arg((4,))), error_regex="input tensor must have at least 2 dimensions") + +def sample_inputs_trilu_indices(op_info, device, dtype, requires_grad, **kwargs): + # (row, col, offset) + args_list = ((0, 0), + (20, 0), + (0, 20), + (20, 21, 0), + (20, 21, 7), + (20, 21, -7), + # Large test cases below are deliberately commented out to speed up CI + # tests and to avoid OOM error. When modifying implementations of + # tril_indices and triu_indices, please enable these tests and make sure + # they pass. + # (2, 68435455, 3), + # (5000, 5000), + # (5000, 5000, 1234), + # (5000, 5000, -1233), + ) + for args in args_list: + yield SampleInput(args[0], args=args[1:], kwargs={"dtype": dtype, "device": device}) + +def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, M, S))) + yield SampleInput(make_arg(())) + +def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs): + # NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format + # This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous + yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs) + + shapes = ( + (3, 5, 6), + (1, 1, 3, 5, 6), + (1, 1, 3, 5, 6, 1, 1), + (1, 0, 3, 5, 0, 2), + (1, 0, 3, 5, 0, 0, 1, 1, 2), + (), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + + yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + for shape, strides, offset in strided_cases: + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format}) + + # channels last 2D + yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last}) + a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last}) + + # channels last 3D + yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d}) + a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d}) + + +def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # list of tuples (shape, shape) defining the shapes of the input and output tensors + sample_shapes = [ + ((), ()), + ((S,), (1,)), + ((S, S), (1, 1)), + ((S, S), (1, S)), + ((S, S), (S, S)), + ((S, S, S), (S, 1, S)), + ] + + for input_shape, output_shape in sample_shapes: + yield SampleInput(make_arg(input_shape), args=(output_shape,)) + if output_shape == (): + continue + yield SampleInput(make_arg(input_shape), args=(list(output_shape),)) + yield SampleInput(make_arg(input_shape), args=(*output_shape,)) + + +def error_inputs_sum_to_size(op_info, device, **kwargs): + shape = (M, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M, M)) + yield ErrorInput(si, error_regex=err_msg) + + shape = (M + 1, S, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M + 1, 1)) + yield ErrorInput(si, error_regex=err_msg) + + +def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + cases = (((S, S, S), (S * S, S)), + ((), ()), + ((), (1, 1, 1)), + ) + + for shape, args_or_shape in cases: + # Update `args` based on operator + if op_info.name == 'resize_': + # resize_ takes shape/tuple of ints, + args = (args_or_shape, ) + elif op_info.name == 'resize_as_': + # resize_as_ takes another tensor + args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] + else: + raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") + + yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=args) + +def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = ( + # a, b, is_tensor_supported + ((S, S, S), (S * S, S), True), + ((S * S, S), (S, S, S), True), + ((S * S, S), (S, -1, S), False), # neg index + ((S * S * 2, S), (S, -1), False), # neg index + ((S,), (S,), True), + ((), (), False), # empty + ((), (1,), True), + ) + + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) + + cases = ( + # a, b, is_tensor_supported + ((125,), (25, 5), True), + ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1), True), + ((16, 32), (2, 4, 1, 4, 4, 1, 4), True), + ((16, 12), (12, 16), True), + ((1, 16, 12), (12, 16), True), + ((1, 5, 1, 5), (25, 1), True), + ((2, 4, 2), (4, 4), True), + ((1, 4), (1, 1, 2, 1, 2), True), + ((3, 5, 7), (7, 5, 3), True), + ((1,), (), False), # empty + ((5, 0, 2, 3), (5, 0, 2, 3), True), + ((2, 1, 0, 3, 1), (5, 0), True), + ((1,), (), False), # empty + ((4, 5, 6), (4, 5, 6, 1, 1, 1), True), + ((), (1, 1, 1, 1), False), # empty + ) + + irreversible_cases = ( + ((), (-1,), False), # neg index, empty + ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if kwargs.get("tensor_arg"): + # convert to tensor + yield SampleInput(make_arg(a), args=(make_arg(b, requires_grad=False),)) + yield SampleInput(make_arg(b), args=(make_arg(a, requires_grad=False),)) + else: + yield SampleInput(make_arg(a), args=(b,)) + yield SampleInput(make_arg(b), args=(a,)) + + for a, b, is_tensor_supported in irreversible_cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def error_inputs_view_reshape(op, device, **kwargs): + + cases = ( + # a, b, is_tensor_supported + # Reshape to different numel + ((2,), (), False), # empty + ((1, 3, 0), (), False), # empty + ((4, 3), (4, 2), True), + ((1, 3, 5), (5, 2, 2), True), + # No valid inference + ((1, 3, 5), (5, -1, 2), False), # neg index + # Two inferred shapes + ((1, 3, 5), (5, -1, -1), False), # neg index + ((1), (0, -1), False), # neg index + ((0, 5), (0, -1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if b == (5, -1, -1): + error_regex = "only one dimension can be inferred" + elif a == (0, 5): + error_regex = (r"cannot reshape tensor of 0 elements into shape " + r"\[0, -1\] because the unspecified dimension size " + r"-1 can be any value and is ambiguous") + else: + # to avoid having issues with a regex + shape = ', '.join(map(str, b)) + size = a if type(a) is int else functools.reduce(operator.mul, a, 1) + error_regex = rf"shape '\[{shape}\]' is invalid for input of size {size}" + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, + error_regex=error_regex) + + +def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): + input_list = [] + shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + yield SampleInput([make_tensor_partial(shape) for shape in shapes]) + +def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): + cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] + ((S, 2, 1), (S, 3, 1)), + ((S), (S, 5)), ((), (1, S)) + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape1, shape2 in cases: + yield SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]) + +def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + if len(shape) > 1: + yield SampleInput(make_tensor_partial(shape), start_dim=1, end_dim=-1) + +def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) + + # shape x start_dim x end_dim + cases = ( + ((5, 4, 0, 1, 3, 7), 1, 3), + ((5, 4, 0, 1, 3, 7), 4, 5), + ((5, 4, 1, 1, 3, 7), 2, 3), + ((), 0, -1), + ((1,), 0, -1), + ((3, 7, 5), 1, 2), + ((4, 5), 1, 1), + ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), + ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), + ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), + ((2, 4, 2), 0, 1), + ((4, 2, 2), 1, 2), + ((0, 3, 4, 5), 1, 3), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape, start, end in cases: + yield SampleInput(make_arg(shape), args=(start, end,)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) + yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) + +def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs): + # in_shape, dim, sizes + args = (((8,), 0, (8,)), + ((8,), 0, (4, 2)), + ((8,), -1, (2, 2, 2)), + ((8,), -1, (-1, 2)), + ((3, 6, 2), 1, (2, 3)), + ((3, 6, 2), -2, (2, 3)), + ((3, 6, 2), -2, (-1, 3)), + ((3, 2, 12), 2, (3, 2, 2)), + ((4, 0), 0, (2, 2)), + ((4, 0), 1, (2, 0, 0, 0)), + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for in_shape, dim, sizes in args: + yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes)) + + +def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (1, 2)), + ((S, S, S), (-1, 2)), + ((S, S, S), (-1, -1)), + ((S, S, S), (1, -1)), + ((S,), (0, 2)) + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (S, S), (1, 2)), + ((S, S, S), (S, S), (-1, 2)), + ((S, S, S), (S, S), (-1, -1)), + ((S, S, S), (S, S), (1, -1)), + ((S,), (), (0, 2)) + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + + +def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), + ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), + ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (1, 0, L, 1)), + ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), + ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (2, 0, L, 1)), + ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), + ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + +def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1, S), (-1, S, -1)), + ((S, 1, S), (-1, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=(args,)) + +def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((), + (2, 3)) + memory_format_options = [None, torch.contiguous_format] + + for shape, memory_format in itertools.product(shapes, memory_format_options): + yield SampleInput(make_arg(shape), + kwargs={'memory_format': memory_format} if memory_format else {}) + yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) + +def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + + cases = (((S, 1, 1), (S, S, S)), + ((), ()), + ((), (1, 1)), + ) + + for shape, shape_other in cases: + yield SampleInput(make_arg(shape, requires_grad=requires_grad), + args=(make_arg(shape_other, requires_grad=False),)) + + +def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + def make_bool_mask(shape): + # Make sure atleast one element is nonzero, + # except for empty tensor + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + + if mask_t.numel() == 0: + return mask_t + elif mask_t.numel() == 1: + mask_t.fill_(True) + return mask_t + + if mask_t.sum() == 0: + def random_index(shape): + return tuple(random.randrange(0, max_idx) for max_idx in shape) + + mask_t[random_index(mask_t.shape)] = True + return mask_t + + return mask_t + + cases = (((M, M), (M, M), (M, M), False), + ((M, 1, M), (M, M), (M, M, 1), True), + ((), (), (), False), + ((M, 1, M), (), (M, M, 1), True), + ((), (M, M), (), True), + ((), (2), (1, 1), True), + ) + + for shape, mask_shape, other_shape, broadcasts_input in cases: + yield SampleInput(make_arg(shape), + args=(make_bool_mask(mask_shape), make_arg(other_shape)), + broadcasts_input=broadcasts_input) + +# TODO: add reference inputs for where(condition) signature +def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) + + make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # noncontiguous + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), noncontiguous=True) + b = make_arg((3, 10, 3)).transpose(0, -1) + + # NOTE that the OpInfo for where takes samples of the form a, cond, b + yield SampleInput(a, args=(c, b)) + + # type promoting + other_dtype = torch.double if dtype is not torch.double else torch.long + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), dtype=torch.long) + b = make_arg((10, 1)) + + yield SampleInput(a, args=(c, b)) + + # two python scalars + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((1,)).item() + b = make_arg((1,)).item() + + yield SampleInput(a, args=(c, b)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + if dtype.is_floating_point: + nan = float('nan') + else: + # dtype.is_complex + nan = complex(float('nan'), float('nan')) + c = make_cond((1, 10, 3)) + a = make_arg((10, 3), noncontiguous=True) + a[2, 1] = nan + b = make_arg((1, 3)) + b[0, 2] = nan + + yield SampleInput(a, args=(c, b)) + + # Python scalars type promotion + for scalar in (0, 0.0, 2j, False): + yield SampleInput(scalar, args=(c, b)) + yield SampleInput(a, args=(c, scalar)) + + +def error_inputs_where(op_info, device, **kwargs): + shape = (S,) + err_msg = "Expected all tensors to be on the same device" + for devices in product(('cpu', device), repeat=3): + if len(set(devices)) == 2: + si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), + args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), + make_tensor(shape, device=devices[2], dtype=torch.float32))) + yield ErrorInput(si, error_regex=err_msg) + +def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + for input_t, as_tuple in product(inputs, [False, True]): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(as_tuple=as_tuple)) + +def sample_inputs_nonzero_static(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + nonzero_sizes = [0, 1, XS, S, M] + + for input_t, nonzero_size in product(inputs, nonzero_sizes): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(size=nonzero_size)) + +def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ((S, S, S), (S, -1))) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=args) + +def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # shape x chunks x dim + cases = ( + ((13, 9, 11), 17, -1), + ((13, 9, 11), 11, -1), + ((13,), 12, -1), + ((15,), 12, -1), + ((15,), 7, 0), + ((15,), 9, 0), + ((3, 7), 9, 1), + ((3, 7), 9, 0), + ((3, 7), 2, 0), + ((3, 7), 3, 0), + ((3, 7), 1, 0), + ((3, 7), 1, 1), + ((4, 4), 2, 0), + ) + + for shape, chunks, dim in cases: + yield SampleInput(make_arg(shape), args=(chunks, dim)) + +def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + test_cases = [ + ((S, S, S), (2,)), + ((S, S, S), (2, 1,)), + ((S, S, S), (2, -1,)), + ((S, S, S), (2, 1, True,)), + ((S, S, S), (2, -1, True,)), + ((S,), (2, 0,)), + ((S,), (2, 0, True,)), + ((), (1,)), + ((), (1, 0,)), + ((), (1, 0, True)), + ] + + yield from (SampleInput(_tensor(tensor), *args) for tensor, args in test_cases) + +def error_inputs_kthvalue(op_info, device, **kwargs): + # tests overlapping output fails + t = make_tensor(10, dtype=torch.float32, device=device) + indices = torch.empty((), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(t, 5, out=(t, indices)), + error_regex="unsupported operation") + + k_out_of_range_err = "selected number k out of range for dimension" + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3, 0), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.tensor(2, device=device), 3), + error_regex=k_out_of_range_err) + +def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, + train=None, valid_input_dim=None, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if valid_input_dim: + cases = ((S,) * i for i in valid_input_dim) + else: + cases = ((S, S), (S,), ()) + p_vals = [0.0, 0.5, 1.0] + # This is to handle special case for feature_alpha_dropout which has different + # supported dtypes depending on `train` parameter + training_vals = [train] if train is not None else [True, False] + + for case, p, training in product(cases, p_vals, training_vals): + yield SampleInput(make_arg(case), p=p, training=training) + yield SampleInput(make_arg(case)) + +def sample_inputs_dropout_backward(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_mask = partial(make_tensor, device=device, dtype=torch.bool, requires_grad=False) + + cases = ((S, S, S, S), (S,), ()) + scale_vals = [0.0, 1.0, 2.0] + + for case, scale in product(cases, scale_vals): + yield SampleInput(make_arg(case), make_mask(case), scale) + +def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high, noncontiguous=False): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, + noncontiguous=noncontiguous) + + def make_per_sample_weight(flag, idx): + # a tensor of float / double weights, or None + # to indicate all weights should be taken to be 1 + if flag: + return make_input(idx.shape) + return None + + offsets = torch.tensor([0, 3], device=device, dtype=torch.long) + for generate_per_sample_weight in (True, False): + for mode in ('sum', 'mean', 'max'): + # per_sample_weights is only supported for mode='sum' (got mode='****') + if generate_per_sample_weight and mode in ('mean', 'max'): + continue + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # bag with zero length + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), + 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + # The gradient vector at `padding_idx` is not updated. + # Negative padding_idx + idx = make_long_input((6,), low=0, high=S) + idx[0] = 4 + idx[4] = 4 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': -1, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((3, 3), low=0, high=S) + # Positive padding_idx + idx[0, 0] = 2 + idx[1, 1] = 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': 2, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'norm_type': 1.0, + 'mode': mode, 'offsets': offsets, + 'per_sample_weights': per_sample_weights},) + + if mode != 'max': + # Scale the gradient based on the inverse frequency of a particular index. + # Note : smax mode does not support sparse weights + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'scale_grad_by_freq': True, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + # gradcheck not implemented for sparse tensors. + # Note : max mode does not support sparse weights + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((6, ), low=0, high=S) + idx[0] = 1 # freq more than 1 + idx[1] = 1 # freq more than 1 + idx[3] = 0 # padding_idx + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, + 'max_norm': 1., 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + +def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) + + # 0-D index tensor + idx = make_long_input((), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # The gradient vector at `padding_idx` is not updated. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 2 + idx[1, 1] = 2 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) + + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 4 + idx[1, 1] = 4 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) + + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) + + # Scale the gradient based on the inverse frequency of a particular index. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) + + # gradcheck not implemented for sparse tensors. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) + + idx = make_long_input((3, 3), low=0, high=S) + idx[0, 0] = 1 # freq more than 1 + idx[0, 1] = 1 # freq more than 1 + idx[1, 0] = 0 # padding_idx + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, + 'padding_idx': 0, 'max_norm': 1.}) + + +def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) + + shapes = ((), (S,), (L, M, S)) + num_classess = (-1, 10) + + return ( + SampleInput( + make_input( + shape, + low=0, + high=10 if num_classes == -1 else num_classes // 2, + ), + kwargs=dict(num_classes=num_classes), + ) + for shape, num_classes in itertools.product(shapes, num_classess) + ) + + +def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): + rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Although most losses also support the reduce and size_average combination instead of reduce, the former is + # deprecated since 0.4.1 and thus is not tested + shapes_and_kwargs = ( + ((), None), + ((S,), dict(reduction="mean")), + ((S,), dict(reduction="sum")), + ((S,), dict(reduction="none")), + ((S, S), None), + ((S, S, S), None), + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), + kwargs=kwargs) + +def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = ("bilinear", "nearest") + align_cornerss = (False, True) + padding_modes = ("zeros", "border", "reflection") + + for dim in (2, 3): + + modes_ = (*modes, "bicubic") if dim == 2 else modes + + for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, *[S] * dim)), + _make_tensor((batch_size, *[S] * dim, dim)), + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + +def reference_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + + batch_size = 2 + num_channels = 3 + height = 345 + width = 456 + modes = ("bilinear", "nearest", "bicubic") + align_cornerss = (False, True) + padding_modes = ('zeros', 'border', 'reflection') + + # Create an affine transformation matrix + a = torch.deg2rad(torch.tensor(45.0)) + ca, sa = torch.cos(a), torch.sin(a) # rotation angles + s1, s2 = 1.23, 1.34 # scales + + theta = torch.tensor([[ + [ca / s1, sa, 0.0], + [-sa, ca / s2, 0.0], + ]], dtype=dtype, device=device) + theta = theta.expand(batch_size, 2, 3).contiguous() + + x = torch.arange(batch_size * num_channels * height * width, device=device) + x = x.reshape(batch_size, num_channels, height, width).to(torch.uint8) + x = x.to(dtype=dtype) + x.requires_grad_(requires_grad) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + grid = torch.nn.functional.affine_grid( + theta, size=(batch_size, num_channels, height, width), align_corners=align_corners + ) + yield SampleInput( + x, + grid, + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_grid_sampler_2d(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = (0, 1, 2) + align_cornerss = (False, True) + padding_modes = (0, 1, 2) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, S, L)), + _make_tensor((batch_size, M + 3, M, 2)), + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_target(shape): + shape = () if len(shape) == 1 else (shape[0], ) + t = torch.randint(0, 2, shape, device=device, dtype=torch.long) + # Label with -1 or 1 + t = t * 2 - 1 + target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) + return target + + shapes = ((S, S), (S,)) + reductions = ('none', 'mean', 'sum') + for s, r in product(shapes, reductions): + yield SampleInput( + make_input(s), + args=(make_input(s), make_target(s)), + kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) + ) + +def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): + input_length = 50 + batch = 16 + num_char = 20 + target_length = 30 + + def make_log_probs(s): + t = make_tensor(s, device=device, dtype=dtype) + log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) + return log_probs + + reductions = ('none', 'mean', 'sum') + zero_inf = (True, False) + lengths_type = (list, torch.Tensor) + for r, z, lt in product(reductions, zero_inf, lengths_type): + log_probs = make_log_probs((input_length, batch, num_char)) + targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) + input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) + target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) + + # Dont generate int[] types if reduction = "Mean" since this results in non composite compliant calls + # to ctc_loss.IntList since a tensor needs to be created from the target lengths. + # Creating such a tensor requires the use of pointers to copy data from int[] -> torch.Tensor + # e.g. via std::copy. Similarly symbolic/real tracing with fx will also not work + if lt is list and r in ["none", "sum"]: + input_lengths = input_lengths.tolist() + target_lengths = target_lengths.tolist() + + yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) + +def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + shape = (2, 3) + num_classes = shape[1] + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # FIXME: Derivative wrt. weight not implemented + make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) + + def make_target(shape, zeros=False): + s = (shape[0], *shape[2:]) if len(shape) > 1 else () + if zeros: + return torch.zeros(s, device=device, dtype=torch.long) + else: + return make_tensor(s, + low=0, + high=shape[1] if len(shape) > 1 else shape[0], + device=device, + dtype=torch.long) + + + def gen_shape_kwargs(): + # Batched, non-batched and 2d + shapes = (shape, (num_classes,), shape + (2, 2)) + reductions = ('none', 'mean', 'sum') + for reduction, s in product(reductions, shapes): + yield make_input(s), make_target(s), dict(reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) + t = make_target(s) + ignore = num_classes // 2 + # If "mean", nll returns NaN, so it's not differentiable at those points + if t.eq(ignore).all() and reduction == "mean": + t.fill_(0) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) + # Test ignoring all the targets + # If "mean", nll returns NaN, so it's not differentiable at those points + if reduction != "mean": + yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target,), kwargs=kwargs) + + target = torch.tensor([-1, 2], device=device, dtype=torch.long) + yield SampleInput(make_input(shape), args=(target,), kwargs={'ignore_index': -1}) + + +def sample_inputs_binary_cross_entropy_with_logits( + op_info, device, dtype, requires_grad, **kwargs +): + make = partial(make_tensor, device=device, dtype=dtype) + make_prob = partial(make, low=0, high=1) + reductions = ("mean", "sum", "none") + + def make_weight_shape_kwargs(): + kwargs = [] + for shape in ((1,), (1, S), (S), (S, S)): + kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) + return kwargs + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *make_weight_shape_kwargs(), + *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], + ] + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + make(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + mask = torch.tensor([[0, 1, 0, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 1, 0], + [1, 0, 1, 1, 0], + [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t) + + yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) + yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) + +def _generate_sample_shape_reduction(): + shapes = ((S,), (S, S), (S, S, S)) + reductions = ('none', 'mean', 'sum') + yield from product(shapes, reductions) + +def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 + make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape(shape): + yield shape + # Broadcast + yield (*shape[:-1], 1) + yield shape[:-1] + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for t_s, v_s in product(gen_shape(s), gen_shape(s)): + yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + + for input, target, var, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, var, ), kwargs=kwargs) + +def error_inputs_gaussian_nll_loss(op_info, device, **kwargs): + _make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 3), low=0), reduction="abc"), + error_type=ValueError, error_regex="abc is not valid") + + # var is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 2), low=0)), + error_type=ValueError, error_regex="var is of incorrect size") + + # target is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 2), _make((10, 2, 3), low=0)), + error_type=RuntimeError, + error_regex=(r"The size of tensor a \(3\) must match the size of tensor b \(2\) " + r"at non-singleton dimension 2")) + +def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for s, r in _generate_sample_shape_reduction(): + yield _make_tensor(s), _make_tensor(s), dict(reduction=r) + +def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = 1 + target[~mask] = -1 + d['margin'] = random.uniform(-9, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + + # scalar input and target. + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) + +def error_inputs_hinge_embedding_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + +def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp = make_input((10, )) + inp[2] = float('nan') + target = make_input((10, )) + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Inf Handling + inp = make_input((10, )) + inp[4] = float('inf') + target = make_input((10, )) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Broadcasting + inp = make_input((5, 5)) + target = make_input((1, 5)) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + +def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + d['delta'] = random.uniform(1e-3, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + +def error_inputs_huber_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + err = 'is not a valid value for reduction' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex=err) + # delta <= 0 + for delta in (0, -1): + err = 'huber_loss does not support non-positive values for delta.' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'delta': delta}), + error_type=RuntimeError, error_regex=err) + +def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for li in (True, False): + for f in (True, False): + i1 = _make_tensor(s) + i2 = _make_tensor(s) + # For Poisson NLL Loss, + # target is assumed to be from + # Poisson Distribution which + # always has positive samples + t1 = _make_tensor(s, low=0) + t2 = _make_tensor(s, low=0) + + if not li: + i1.abs_() + i2.abs_() + t1.abs_() + t2.abs_() + + yield ( + i1, t1, + dict(log_input=li, full=f, reduction=r) + ) + yield ( + i2, t2, + dict(log_input=li, full=f, + eps=random.uniform(1e-8, 1e-3), + reduction=r) + ) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, ), kwargs=kwargs) + + # test INT_TO_FLOAT promotion + if dtype.is_complex: + for d in (torch.bool, torch.int64): + yield SampleInput(_make_tensor(dtype=dtype), args=(_make_tensor(dtype=d),)) + yield SampleInput(_make_tensor(dtype=d), args=(_make_tensor(dtype=dtype),)) + +def error_inputs_poisson_nll_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(5\) must match the ' + r'size of tensor b \(4\) at non-singleton ' + r'dimension 1)')) + +def error_inputs_soft_margin_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)')) + +def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): + make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) + + kwargss = ( + *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], + dict(swap=True), + *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], + ) + + for kwargs in kwargss: + input = make() + args = (make(), make()) + if with_distance: + kwargs["distance_function"] = torch.nn.PairwiseDistance() + yield SampleInput(input, args=args, kwargs=kwargs) + +def error_inputs_triplet_margin_loss(op_info, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + samples = ( + # input, args, kwargs, error_type, error_regex + # invalid reduction + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(reduction="abc"), + ValueError, "abc is not a valid value for reduction"), + + # invalid margin + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(margin=-1.0), + ValueError, "margin must be greater than 0, got -1.0"), + + # shape mismatch + (make_input(3, 5), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(5\) must match the size of tensor b \(4\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 5), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 4), make_input(3, 5)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + + # different dimensions + (make_input(3,), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 1D, positive 2D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3,), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 1D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3, 4), make_input(3,)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 2D, " + r"and negative 1D inputs")), + ) + + for input, args, kwargs, error_type, error_regex in samples: + yield ErrorInput(SampleInput(input, args=args, kwargs=kwargs), + error_type=error_type, error_regex=error_regex) + +def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs): + make_mat_e4m3 = partial(make_tensor, device=device, dtype=torch.float8_e4m3fn, requires_grad=requires_grad) + make_mat_e5m2 = partial(make_tensor, device=device, dtype=torch.float8_e5m2, requires_grad=requires_grad) + M, N, K = 15, 32, 16 + samples = [] + # two e4m3 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e4m3 mat2 e5m2 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e5m2((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e5m2 mat2 e4m3 + mat1 = make_mat_e5m2((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + + yield from samples + +def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8 + + dim_3_q_shape = (batch, seq_q, head_dim) + dim_3_kv_shape = (batch, seq_kv, head_dim) + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + broadcast_tuple = ((num_heads, seq_q, head_dim), (batch, num_heads, seq_kv, head_dim)) + + qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] + samples = [] + for qkv_shape, is_causal, dropout_p in product( + qkv_shapes, [True, False], [0.0, 0.5]): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q), + make(shape_kv), + make(shape_kv), + is_causal=is_causal, + dropout_p=dropout_p + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim + 8)), + is_causal=is_causal, + dropout_p=dropout_p + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + attn_mask=make((seq_q, seq_kv)), + is_causal=False, + dropout_p=0.0) + ) + + yield from samples + + +def sample_inputs_efficient_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + mask_types = [1, 2] # UpperLeft, LowerRight + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, mask_type, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], mask_types, scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=mask_type, + compute_log_sumexp=requires_grad, + scale=scale, + seqlen_k=None + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim + 8)), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + bias=make(batch, num_heads, seq_q, seq_kv), + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None + ) + ) + + # jagged (with query/keys offsets) + cu_seqlens_k = torch.arange(-1, 32 * 2 + 1, 2, dtype=torch.int32, device=device) + cu_seqlens_k[-1] = 62 + cu_seqlens_k[0] = 0 + samples.append( + SampleInput( + make((32, 2, 64)).view(-1, 8, 8).unsqueeze(0), + make((64, 64)).view(-1, 8, 8).unsqueeze(0), + make((64, 64)).view(-1, 8, 8).unsqueeze(0), + bias=None, + cu_seqlens_q=torch.arange(0, 32 * 2 + 2, 2, dtype=torch.int32, device=device), + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=2, + max_seqlen_k=2, + dropout_p=0.0, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None, + ) + ) + + yield from samples + +def sample_inputs_flash_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + cum_seq_q=None, + cum_seq_k=None, + max_q=seq_q, + max_k=seq_kv, + dropout_p=dropout_p, + is_causal=is_causal, + return_debug_mask=False, + scale=scale, + )) + + yield from samples + +def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shape = (3,) + batched_shape = (2, *shape) + shapes_and_kwargs = [ + (shape, None), + (batched_shape, None), + (shape, dict(keepdim=True)), + (batched_shape, dict(keepdim=True)), + (shape, dict(p=5.0)), + (shape, dict(p=-1.0)), + (shape, dict(eps=1.0)), + ] + + return ( + SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs + ) + +def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 9, 2, 2)), upscale_factor=upscale_factor) + for upscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), upscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 1, 6, 6)), downscale_factor=downscale_factor) + for downscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), downscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_channel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes_groups = [ + ((1, 4, 10, 10), 2), + ((2, 6, 8, 8), 3), + ((2, 8, 5, 5), 4), + ] + + yield from ( + SampleInput(make_arg(shape), args=(groups,)) + for shape, groups in shapes_groups + ) + +def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype) + # Lower bounds must be greater than 'eps' defined in gradcheck.py::gradgradcheck() -> eps + # otherwise perturbation calculation causes Tensor value to become negative triggering + # a device-side hardware assertion + make_prob = partial(make, low=1e-6, high=1) + + reductions = ("mean", "sum", "none") + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], + ] + + if logits: + shapes_and_kwargs.extend( + [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + (make if logits else make_prob)(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): + sample_shapes = [(), (S), (S, S, S)] + atols = [1e-2, 1e-16] + rtols = [1e-1, 0.5] + eps = 1e-8 + for s, rtol, atol in product(sample_shapes, rtols, atols): + # close sample + t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + close = (t + atol).detach().requires_grad_(requires_grad) + yield SampleInput(t, close, rtol=rtol, atol=atol) + + # random sample + a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(a, b, rtol=rtol, atol=atol) + + +def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + # test COMPLEX_TO_FLOAT promotion + if dtype.is_complex: + make = partial(make_tensor, (), device=device, requires_grad=requires_grad) + yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) + yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) + +def error_inputs_l1_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)') + ) + +def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) + + # This test case always triggers the smooth condition, since absolute difference of input and target + # is smaller than beta + yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) + yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) + +def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): + # kl_div works with inputs in [0, 1] (aka the pdf of a probability measure) + # Then log [0, 1] = (-inf, 0], so this is the log space + make_arg = partial(make_tensor, low=0., device=device, dtype=dtype, requires_grad=requires_grad) + + def make_log(shape): + out = torch.nn.functional.log_softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + def make_prob(shape): + out = torch.nn.functional.softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + shapes = ((2,), (2, 3)) + reductions = ("none", "mean", "batchmean", "sum") + for shape, reduction, log_target in product(shapes, reductions, (True, False)): + input = make_log(shape) + target = make_log(shape) if log_target else make_prob(shape) + yield SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) + +def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) + yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) + +def reference_pdist(input, p=2): + pdist = scipy.spatial.distance.pdist + if p == 0: + output = pdist(input, "hamming") * input.shape[1] + elif p == float("inf"): + output = pdist(input, lambda x, y: np.abs(x - y).max()) + else: + output = pdist(input, "minkowski", p=p) + return output.astype(input.dtype) + +def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(())) + yield SampleInput(make_input((2,))) + yield SampleInput(make_input((2, 2))) + yield SampleInput(make_input((2,)), offset=1) + yield SampleInput(make_input((2,)), offset=-1) + +def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + unpool_name_to_pool_method_dict = { + 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, + 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, + 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d + } + + unpool_name_to_dim = { + 'nn.functional.max_unpool1d': 1, + 'nn.functional.max_unpool2d': 2, + 'nn.functional.max_unpool3d': 3 + } + + unpool_to_pool_name_dict = {k: f'nn.functional.{v.__name__}' for k, v in unpool_name_to_pool_method_dict.items()} + + pool_dim = unpool_name_to_dim[op_info.name] + pool_method = unpool_name_to_pool_method_dict[op_info.name] + + pool_op_info = copy.copy(op_info) + pool_op_info.name = unpool_to_pool_name_dict[op_info.name] + + for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): + # shapes (C, ...) do not work as of now, + # see https://github.com/pytorch/pytorch/issues/68337 + # TODO: remove once the issue is resolved + if sample.input.dim() != pool_dim + 2: + continue + + # No dilation > 1 for max_unpool, + # see https://github.com/pytorch/pytorch/issues/68420 + if sample.kwargs['dilation'] != 1: + continue + + # Can't unpool without indices + if sample.kwargs['return_indices']: + pool, indices = pool_method(sample.input, **sample.kwargs) + # arg has to be a leaf + arg = pool.detach().requires_grad_(requires_grad) + sample_kwargs = { + 'kernel_size': sample.kwargs['kernel_size'], + 'stride': sample.kwargs['stride'], + 'padding': sample.kwargs['padding'], + # output_size could be None but we specify it explicitly + # to compensate for the information lose in pool due + # to the floor/ceil operation used to compute the shapes + 'output_size': sample.input.size() + } + + yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) + +def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): + for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + indices = sample.args[0] + # The samples for max_unpool are generated with max_pool. + # It could be that a single element from the max_pool's + # input is mapped to several locations in its output. + # This situation leads to failed gradchecks because + # the finite difference algorithm perturbs the elements + # of the output one by one, and not in classes of + # equivalences determined by whether two elements + # in the output are coming from the same location in the + # input (simply put, they have the same corresponding index). + # So, there are two ways to resolve this issue: + # 1. Extract a perturbation for one element and apply it all + # the elements from the same equivalence class, or + # 2. Make sure that the equivalence classes are all singletons, + # i.e. the index tensor has to be comprised of only unique + # indices. + # Here we go with the solution 2, the easiest of all. + if indices.unique().numel() == indices.numel(): + yield sample + +def sample_inputs_multi_head_attention_forward(opinfo, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if requires_grad: + # backward tests would take too long to complete, causing the job timeout. + bsz = 2 + is_batcheds = (True,) + use_separate_proj_weights = (False,) + emb_sizes = (2,) + src_lens = (XS,) + tgt_lens = (XS,) + heads = (2,) + dropouts = (0.5,) + mask_types = ("2d",) + else: + bsz = 2 + is_batcheds = (False, True) + use_separate_proj_weights = (False, True) + emb_sizes = (2, 4) + src_lens = (XS,) + tgt_lens = (XS, S) + heads = (1, 2) + dropouts = (0.0, 0.5) + mask_types = (None, "2d", "3d") + + for is_batched, use_separate_proj_weight, mask_type, emb_size, src_len, tgt_len, num_heads, dropout_p in itertools.product( + is_batcheds, use_separate_proj_weights, mask_types, emb_sizes, src_lens, tgt_lens, heads, dropouts + ): + attn_mask = None + if mask_type == "2d": + attn_mask = make_input(src_len, tgt_len) + elif mask_type == "3d": + attn_mask = make_input((bsz if is_batched else 1) * num_heads, src_len, tgt_len) + + if is_batched: + q = make_input(src_len, bsz, emb_size) + k = make_input(tgt_len, bsz, emb_size) + v = make_input(tgt_len, bsz, emb_size) + else: + q = make_input(src_len, emb_size) + k = make_input(tgt_len, emb_size) + v = make_input(tgt_len, emb_size) + if use_separate_proj_weight: + in_proj_weight = None + q_proj_weight = make_input(emb_size, emb_size) + k_proj_weight = make_input(emb_size, emb_size) + v_proj_weight = make_input(emb_size, emb_size) + else: + in_proj_weight = make_input(emb_size * 3, emb_size) + q_proj_weight = None + k_proj_weight = None + v_proj_weight = None + + bias_k = make_input(emb_size) + bias_v = make_input(emb_size) + in_proj_bias = make_input(emb_size * 3) + out_proj_weight = make_input(emb_size, emb_size) + out_proj_bias = make_input(emb_size) + sample_args = ( + k, v, emb_size, num_heads, in_proj_weight, + in_proj_bias, bias_k, bias_v, False, + dropout_p, out_proj_weight, out_proj_bias + ) + sample_kwargs = { + "q_proj_weight" : q_proj_weight, + "k_proj_weight" : k_proj_weight, + "v_proj_weight" : v_proj_weight, + "attn_mask" : attn_mask, + "training" : True if dropout_p > 0.0 else False, + "use_separate_proj_weight" : use_separate_proj_weight + } + + yield SampleInput(q, args=sample_args, kwargs=sample_kwargs) + + +# Includes some values such that N * N won't be a multiple of 4, +# which should ensure we test the vectorized and non-vectorized +# kernel code paths. +NUM_SIZE0_TENSORS = 10000 +foreach_num_tensors = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300] +_foreach_inputs_default_kwargs = {"noncontiguous": False, "same_size": False, "low": None, "high": None} + + +class ForeachRightmostArgType(enum.Enum): + TensorList = enum.auto() + ScalarList = enum.auto() + Scalar = enum.auto() + Tensor = enum.auto() + + +class ForeachSampleInput(SampleInput): + # For TensorList Scalar/Tensor, we compute the reference + # by converting it into TensorList ScalarList/TensorList and + # then converting into multiple Tensor Scalar/Tensor. + # ref_args contains the args converted to TensorList ScalarList/TensorList + ref_args: Any + disable_fastpath: bool + + def __init__(self, *args, disable_fastpath=False, ref_args=None, **kwargs): + super().__init__(*args, **kwargs) + self.ref_args = ref_args or self.args + self.disable_fastpath = disable_fastpath + + +class foreach_inputs_sample_func: + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + self.arity = arity + self._set_rightmost_arg_types( + rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor, + ) + self._intersperse_empty = (True, False) + + def _set_rightmost_arg_types( + self, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool, + ) -> None: + self._rightmost_arg_types = [ForeachRightmostArgType.TensorList] + if self.arity > 1: + if rightmost_supports_scalar: + self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar) + if rightmost_supports_scalarlist: + self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList) + if rightmost_supports_tensor: + self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor) + + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.Tensor: + return [make_tensor( + (), device=device, dtype=dtype, + noncontiguous=_foreach_inputs_kwargs["noncontiguous"], + requires_grad=_foreach_inputs_kwargs.get("requires_grad", False), + )] + should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16) + + def sample_float(): + s = random.random() + if should_use_simpler_scalars: + return 1.0 if s > 0.5 else 2.0 + else: + return 1.0 - s + + high = 2 if should_use_simpler_scalars else 9 + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, high) + 1 for _ in range(num_tensors)], + [sample_float() for _ in range(num_tensors)], + [complex(sample_float(), sample_float()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return ( + random.randint(1, high + 1), + sample_float(), + True, + complex(sample_float(), sample_float()), + ) + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + if self.arity == 1: + if "foreach_abs" in opinfo.name and dtype in complex_types(): + return True + # unary + if opinfo.ref in (torch.abs, torch.neg): + return False + if opinfo.ref_inplace in (torch.Tensor.zero_,): + return False + return dtype in integral_types_and(torch.bool) + if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor: + return None + if "foreach_pow" in opinfo.name and dtype in integral_types_and(torch.bool): + return True + if any( + foreach_name in opinfo.name + for foreach_name in ("foreach_clamp_max", "foreach_clamp_min", "foreach_maximum", "foreach_minimum") + ) and dtype in integral_types_and(torch.bool): + return True + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if "foreach_add" in opinfo.name and dtype == torch.bool: + disable_fastpath = True + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.Scalar: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if isinstance(rightmost_arg, bool): + disable_fastpath |= dtype == torch.bool + if opinfo.ref in (torch.add, torch.mul): + disable_fastpath = False + elif isinstance(rightmost_arg, int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg, float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg, complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}") + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.ScalarList: + disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool) + elmt_t = type(rightmost_arg[0]) + has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg) + if not has_same_type: + return dtype not in complex_types() + if isinstance(rightmost_arg[0], bool): + if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool: + disable_fastpath = False + elif isinstance(rightmost_arg[0], int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg[0], float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg[0], complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalarlist of {rightmost_arg}") + return disable_fastpath + else: + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param: + if dtype in integral_types_and(torch.bool): + kwargs["alpha"] = 3 + elif dtype.is_complex: + kwargs["alpha"] = complex(3, 3) + else: + kwargs["alpha"] = 3.14 + if self.arity > 1: + kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype) + return kwargs + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for rightmost_arg_type in self._rightmost_arg_types: + zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs) + zero_size_foreach_inputs_kwargs["zero_size"] = True + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + if self.arity > 1: + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + args.append( + self._sample_rightmost_arg( + opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS, + **zero_size_foreach_inputs_kwargs)[0]) + kwargs = self._sample_kwargs( + opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype) + else: + args = [] + kwargs = {} + if opinfo.ref in (torch.abs, torch.neg): + kwargs["disable_fastpath"] = False + else: + kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + _foreach_inputs_kwargs["zero_size"] = False + + # add empty tensor interspersion to test fully fixing #100701 + for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( + num_input_tensors, self._rightmost_arg_types, self._intersperse_empty): + if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'): + # generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy + continue + _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors + input = sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + args = [] + if self.arity > 1: + args = [ + sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, + **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + args.append(rightmost_arg) + kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype) + ref_args = args + if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor): + ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]] + sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs) + yield sample + args.pop() + else: + yield ForeachSampleInput( + input, + *args, + disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype), + ) + + +class foreach_max_sample_func(foreach_inputs_sample_func): + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor) + self._intersperse_empty = (False,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + return [] + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return False + + +class foreach_norm_sample_func(foreach_inputs_sample_func): + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for ord in (0, 1, 2, -1, -2, float('inf'), float('-inf')): + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors = kwargs.pop("num_input_tensors", foreach_num_tensors) + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, ord, out_dtype in product( + num_input_tensors, + (0, 1, 2, -1, -2, float('inf'), float('-inf')), + (None,) + (torch.complex128,) if dtype in complex_types() else (torch.float64,), + ): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath, dtype=out_dtype) + + # Also test nan propagation with a single tensor, but skip autograd testing + if not requires_grad: + nan_inputs = [ + [float('nan')], + [float('nan'), 1.0], + [1.0, float('nan')], + [1.0, 2.0, 3.0, float('nan'), float('nan'), 7.0, float('nan'), float('nan'), -1.5, 6.0], + [7.0, 3.0, float('nan'), float('nan'), -1.5, 6.0], + [3.0, float('nan'), float('nan'), -1.5, 6.0], + ] + for input in nan_inputs: + x = torch.tensor(input, device=device) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput([x], ord=ord, disable_fastpath=disable_fastpath) + + +class foreach_lerp_sample_func(foreach_inputs_sample_func): + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, 9) + 1 for _ in range(num_tensors)], + [1.0 - random.random() for _ in range(num_tensors)], + [complex(1.0 - random.random(), 1.0 - random.random()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return [random.random()] + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + +class foreach_pointwise_sample_func(foreach_inputs_sample_func): + + def __init__( + self, + arity: int = 3, + rightmost_supports_scalar: bool = False, + rightmost_supports_scalarlist: bool = False, + ): + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist) + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return dtype in integral_types_and(torch.bool) and opinfo.ref in (torch.addcmul,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + # zero_size tensor + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + for _ in range(2) + ] + if "scalars" in kwargs: + del kwargs["scalars"] + kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, rightmost_arg_type in itertools.product(num_input_tensors, self._rightmost_arg_types): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList)) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.append(rightmost_arg) + elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]: + kwargs["scalars"] = rightmost_arg + else: + kwargs["value"] = rightmost_arg + kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)) + assert len(args) == 2, f"{len(args)=}" + sample = ForeachSampleInput(input, *args, **kwargs) + yield sample + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.pop() + + +foreach_unary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + 'exp', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'acos', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'asin', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'atan', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'cos', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'cosh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log10', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log2', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'tan', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + toleranceOverride( + { + torch.complex64: tol(atol=3e-04, rtol=2e-05) + } + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'tanh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + toleranceOverride( + {torch.complex64: tol(atol=5e-03, rtol=1e-04)} + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'sin', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'sinh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'neg', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_unary_op_tensors_on_different_devices", + device_type="cuda", + dtypes=(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'sqrt', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'ceil', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'erf', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'erfc', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'expm1', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'floor', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'log1p', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'round', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'frac', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'reciprocal', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'sigmoid', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + device_type="cuda", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'trunc', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'abs', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()), + ), + ), + ForeachFuncInfo( + 'zero', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_out=False, + ), + ForeachFuncInfo( + 'sign', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'lgamma', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), + # DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + # "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + device_type="cuda", + dtypes=(torch.bfloat16,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types() + (torch.bfloat16,), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), +] + +foreach_binary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + "add", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_alpha_param=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # These tests fail with aten._local_scalar_dense not being implemented. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "sub", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_alpha_param=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "mul", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "div", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + # fails with div_cpu is not implemented with ComplexHalf + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.float16,), device_type='cpu'), + ), + ), + ForeachFuncInfo( + "clamp_min", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + ForeachFuncInfo( + "clamp_max", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "minimum", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=False, + supports_forward_ad=False, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "maximum", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_forward_ad=False, + supports_inplace_autograd=False, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=complex_types(), + active_if=lambda kwargs: not kwargs.get("noncontiguous", False), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + ForeachFuncInfo( + "pow", + supports_alpha_param=False, + supports_scalar_self_arg=True, + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=(torch.half,), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.half,), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.half,), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.half,), device_type="cpu"), + DecorateInfo(unittest.skip("flaky"), "TestForeach", "test_parity", device_type="cpu", dtypes=(torch.complex64,)), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_with_scalar_self_support", + device_type="cuda", + dtypes=(torch.bool,), + active_if=lambda kwargs: kwargs["is_fastpath"], + ), + ), + backward_requires_result=True, + ), + ForeachFuncInfo( + "copy", + sample_inputs_func=foreach_inputs_sample_func(2, False, False), + supports_out=False, + supports_forward_ad=False, + supports_autograd=False, + supports_inplace_autograd=False, + ) +] + +foreach_pointwise_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "addcmul", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "addcdiv", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + # fails with div_cpu is not implemented with ComplexHalf + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), +] + +foreach_reduce_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "max", + sample_inputs_func=foreach_max_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # no complex support for ordering ops like max + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_foreach_reduce_large_input", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + ), + ), + ForeachFuncInfo( + "norm", + sample_inputs_func=foreach_norm_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_foreach_reduce_large_input", + device_type="cuda", + dtypes=integral_types_and(torch.bool), + ), + ), + ), +] + +foreach_other_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "lerp", + sample_inputs_func=foreach_lerp_sample_func(3, True, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool)), + ), + ), +] + +def reference_sign(x): + if x.dtype == np.bool_: + # `np.sign` doesn't support `bool`. + # >>> np.sign(True) + # ufunc 'sign' did not contain a loop + # with signature matching types dtype('bool') -> dtype('bool') + return np.sign(x, dtype=np.uint8).astype(np.bool_) + return np.sign(x) + + +def reference_sgn(x): + # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. + # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. + # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) + if x.dtype not in [np.complex64, np.complex128]: + return reference_sign(x) + + out = (x / np.abs(x)) + if out.ndim == 0: + # Handle x == 0 case + if (x == 0): + # Can't assign to np.complex object + # So make a new one. + return np.array(complex(0, 0), dtype=x.dtype) + return out + + # Handle x == 0 case + mask = (x == 0) + out[mask] = complex(0, 0) + return out + + +def reference_sigmoid(x): + # 'scipy.special.expit' not supported for the input types + if x.dtype in [np.complex64, np.complex128]: + return (1 / (1 + np.exp(-x))) + return scipy.special.expit(x) + + +def reference_logsigmoid(x): + return np.where( + x < 0, + x - np.log1p(np.exp(x)), + -np.log1p(np.exp(-x))) + + +def reference_hardsigmoid(x): + intermediate = x / 6 + 0.5 + y = np.clip(intermediate, 0, None) + return np.where(y > 1, 1, y).astype(x.dtype) + + +def reference_lgamma(x): + # scipy.special.gammaln returns `-inf` when input is `-inf`. + # While Pytorch, C and C++, all return `inf` when input is `-inf`. + # Reference: + # https://en.cppreference.com/w/cpp/numeric/math/lgamma + # https://en.cppreference.com/w/c/numeric/math/lgamma + + # To handle the above discrepancy, + # we replace -inf with inf so values + # that were originally -inf map to inf as expected + if x.dtype.kind == 'f': + x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) + + out = scipy.special.gammaln(x) + + if x.dtype == np.float16: + # `scipy.special.gammaln` returns output of float32 when input is float16, + # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, + # Pytorch version outputs `inf` while SciPy returns finite values. + out = out.astype(np.float16) + + return out + + +def reference_mvlgamma(x, d): + if x.dtype == np.float16: + return scipy.special.multigammaln(x, d).astype(np.float16) + + return scipy.special.multigammaln(x, d) + +def reference_softplus(input, beta=1, threshold=20): + non_linear = input * beta <= threshold + output = input.copy() + output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta + return output + +def reference_gelu(X, *, approximate='none'): + def _gelu_ref(X): + return X * stats.norm.cdf(X) + + def _tanh_gelu_ref(X): + M_SQRT_2_PI = math.sqrt(2 / math.pi) + Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) + return 0.5 * X * (1.0 + np.tanh(Z)) + + if approximate == 'tanh': + return _tanh_gelu_ref(X) + else: + return _gelu_ref(X) + + +def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: + if num_classes == -1: + num_classes = int(np.amax(a) + 1) + + idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes + one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) + np.put(one_hot, idcs, 1) + return one_hot.reshape(*a.shape, -1) + + +def reference_mse_loss(input, target, reduction="mean"): + se = (input - target) ** 2 + if reduction == "mean": + return np.mean(se) + elif reduction == "sum": + return np.sum(se) + else: # reduction == "none" + return se + + +def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): + return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0] + + +def reference_native_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight, bias, eps): + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + if weight is None and bias is not None: + Y = Y + bias.reshape(-1) + elif weight is not None and bias is None: + Y = Y * weight.reshape(-1) + elif weight is not None and bias is not None: + Y = Y * weight.reshape(-1) + bias.reshape(-1) + axis = inp.ndim - len(normalized_shape) + stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape) + return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape) + + +def reference_rms_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, eps=None): + if eps is None: + eps = torch.finfo(numpy_to_torch_dtype(inp.dtype)).eps + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + rms = np.sqrt((inp_view**2).mean(axis=-1, keepdims=True) + eps) + Y = inp_view / rms + if weight is not None: + Y = Y * weight.reshape(-1) + return Y.reshape(*inp.shape) + + +def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): + inp_view = inp + if np.prod(inp.shape) != 0: + inp_view = inp.reshape((inp.shape[0], num_groups, -1)) + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + Y = Y.reshape(inp.shape) + if weight is not None: + # weight is a vector of length equal to the channel + if len(Y.shape) > 2: + weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y * weight + if bias is not None: + # bias is a vector of length equal to the channel + if len(Y.shape) > 2: + bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y + bias + return Y + + +# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't +# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into +# stacked 1D cases +def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): + side = 'right' if (right or side == 'right') else 'left' + if len(sorted_sequence.shape) == 1 : + ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) + return ret.astype(np.int32) if out_int32 else ret + elif sorted_sequence.shape[0] == 0: + if sorter is not None: + sorter = sorter.flatten() + ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) + ret = ret.astype(np.int32) if out_int32 else ret + return ret.reshape(boundary.shape) + else: + # numpy searchsorted only supports 1D inputs so we split up ND inputs + orig_shape = boundary.shape + num_splits = np.prod(sorted_sequence.shape[:-1]) + splits = range(0, num_splits) + sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) + if sorter is not None: + sorter = sorter.reshape(num_splits, -1) + + split_sequence = [sorted_sequence[i] for i in splits] + split_boundary = [boundary[i] for i in splits] + split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] + + split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) + for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] + split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret + return np.stack(split_ret).reshape(orig_shape) + +def loss_reference_reduction_wrapper(fn): + def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): + if size_average is not None or reduce is not None: + raise RuntimeError( + "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" + ) + output = fn(input, target, **other_kwargs) + if reduction == "mean": + return np.mean(output) + elif reduction == "sum": + return np.sum(output) + else: # reduction == "none" + return output + + return wrapper + +@loss_reference_reduction_wrapper +def reference_smooth_l1_loss(input, target, beta=1.0): + diff = input - target + abs_diff = np.abs(diff) + above_threshold = abs_diff >= beta + + loss = np.empty_like(input) + loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta + loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) + + return loss + +def reference_std_var(f): + """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" + g = reference_reduction_numpy(f) + + @wraps(g) + def wrapper(x: np.ndarray, *args, **kwargs): + assert not ('unbiased' in kwargs and 'correction' in kwargs) + + if 'unbiased' in kwargs: + kwargs['ddof'] = int(kwargs.pop('unbiased')) + elif 'correction' in kwargs: + kwargs['ddof'] = kwargs.pop('correction') + + return g(x, *args, **kwargs) + + return wrapper + +def generate_std_var_kwargs(t: torch.Tensor, **kwargs): + """Generates unbiased/correction kwargs for std/var operators""" + yield ((), {'unbiased': True}) + yield ((), {'unbiased': False}) + + # Currently, calling std with correction is only enabled when + # both dim and keepdim are provided. + if 'dim' in kwargs and 'keepdim' in kwargs: + yield ((), {'correction': 0}) + yield ((), {'correction': 1}) + + numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() + yield ((), {'correction': numel // 2}) + +def error_inputs_mean(op_info, device, is_ref=False, **kwargs): + if is_ref: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []), + error_regex=err_msg1, + ) + + if is_ref: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.float32, device=device), + [], + dtype=torch.int64), + error_regex=err_msg2 + ) + + if is_ref: + err_msg3 = "Expected out tensor to have dtype torch.float64, but got torch.float32 instead" + else: + err_msg3 = "Expected out tensor to have dtype double, but got float instead" + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.int64, device=device), + [], + dtype=torch.float64, + out=make_tensor([], dtype=torch.float32, device=device), + ), + error_regex=err_msg3 + ) + +# numpy implementation of torch.flatten +# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape +def reference_flatten(input, start_dim=0, end_dim=-1): + in_shape = input.shape + in_rank = len(in_shape) + for d in start_dim, end_dim: + if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank): + raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank-1}], but got {d}") + end_dim = end_dim if end_dim >= 0 else in_rank + end_dim + start_dim = start_dim if start_dim >= 0 else in_rank + start_dim + if in_rank == 0: + end_dim = start_dim + if end_dim < start_dim: + raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim") + flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1) + out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:] + return np.reshape(input, out_shape) + +# Operator database (sorted alphabetically) +op_db: List[OpInfo] = [ + UnaryUfuncInfo('abs', + aliases=('absolute', ), + ref=np.abs, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_grad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients', + 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs", + "test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) + # We can break the logic of the loop over all possible types but it is OK. + # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + ), + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True), + # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) + UnaryUfuncInfo('acos', + aliases=('arccos', ), + ref=np.arccos, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-1, + torch.complex64: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS),)), + # NOTE: the derivative for inplace acosh is not implemented + UnaryUfuncInfo('acosh', + aliases=('arccosh', ), + ref=np.arccosh, + domain=(1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + # acosh is not defined at x < 1 (real) + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)), + safe_val=2)), + BinaryUfuncInfo('add', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.float16, torch.chalf), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_add_sub, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestCommon', + 'test_numpy_refs', + dtypes=(torch.complex128,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('item', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs), + ref=np.ndarray.item, + method_variant=None, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool), + supports_out=False, + supports_autograd=False, + error_inputs_func=error_inputs_item, + sample_inputs_func=sample_inputs_item, + skips=( + # Error testing item function variant + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32, torch.complex64)), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: Composite compliance check failed with the above error. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'), + )), + OpInfo('arange', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_out=True, + supports_autograd=False, + is_factory_function=True, + error_inputs_func=error_inputs_arange, + sample_inputs_func=sample_inputs_arange, + skips=( + # https://github.com/pytorch/pytorch/issues/81774 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Lazy tensor failures + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + + # Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608 + # We don't have an op for aten::arange but it isn't a special case. + # Argument types: bool, bool, bool, int, int, Device, boo + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + + # Captured graph does not contain aten::arange (succeeds on complex!) + # g: graph(): + # %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]() + # return (%25) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('cauchy', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.cauchy_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_cauchy, + error_inputs_func=error_inputs_cauchy, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('exponential', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.exponential_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_exponential, + error_inputs_func=error_inputs_exponential, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('geometric', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.geometric_, + dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_geometric, + error_inputs_func=error_inputs_geometric, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('log_normal', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.log_normal_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_log_normal, + error_inputs_func=error_inputs_log_normal, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('normal', + variant_test_name='in_place', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.normal_, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_normal, + error_inputs_func=error_inputs_normal, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + )), + OpInfo('uniform', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs), + method_variant=None, + inplace_variant=torch.Tensor.uniform_, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_autograd=False, + is_factory_function=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_uniform, + error_inputs_func=error_inputs_uniform, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # aten.uniform was not decomposed + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('clamp_max', + ref=_clamp_max_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('clamp_min', + ref=_clamp_min_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('mul', + aliases=('multiply',), + dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + error_inputs_sparse_func=error_inputs_sparse_mul, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)), + BinaryUfuncInfo('sub', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), + aliases=('subtract',), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_add_sub, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + OpInfo('addmm', + # This addmm OpInfo is for when alpha and beta are not both equal to 1. + # alpha=beta=1 is tested in the following opinfo, because that special case will + # trigger addmm being decomposed by a jit pass. + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_addmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('addmm', + # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. + variant_test_name='decomposed', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], + sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # https://github.com/pytorch/pytorch/issues/71784 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.float16,)), + )), + OpInfo('addmv', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + sample_inputs_func=sample_inputs_addmv), + OpInfo('addbmm', + ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), + np.multiply(np.asarray(alpha, dtype=batch1.dtype), + np.sum(np.matmul(batch1, batch2), axis=0))), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_refs'), + # MPS has slightly worse precision. Is this acceptable? + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + 'TestConsistency', + 'test_output_match', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}), + 'TestCommon', 'test_out'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=6e-3, rtol=6e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # addbmm does not correctly warn when resizing out= inputs + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # https://github.com/pytorch/pytorch/issues/55907 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addbmm), + OpInfo('baddbmm', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], + torch.complex64, torch.complex128), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view', device_type='cuda'), + ], + sample_inputs_func=sample_inputs_baddbmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('dot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('vdot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('bmm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", "test_out") + ), + sample_inputs_func=sample_inputs_bmm), + OpInfo('mv', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mv), + OpInfo('addr', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + # Reference: https://github.com/pytorch/pytorch/issues/50747 + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/50747 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), + ), + sample_inputs_func=sample_inputs_addr, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('addcmul', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + OpInfo('addcdiv', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + UnaryUfuncInfo('asin', + aliases=('arcsin', ), + ref=np.arcsin, + domain=(-1, 1), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + # NOTE: derivative for inplace asinh is not implemented + UnaryUfuncInfo('asinh', + aliases=('arcsinh', ), + ref=np.arcsinh, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('atan', + aliases=('arctan', ), + ref=np.arctan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + BinaryUfuncInfo('atan2', + aliases=('arctan2',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + )), + UnaryUfuncInfo('atanh', + aliases=('arctanh', ), + ref=np.arctanh, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('allclose', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=np.allclose, + supports_autograd=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_allclose, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_out=False), + OpInfo('broadcast_to', + ref=np.broadcast_to, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_broadcast_to), + OpInfo('broadcast_shapes', + op=torch.broadcast_shapes, + ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, + dtypes=_dispatch_dtypes((torch.float32,)), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + supports_autograd=False, + supports_scripting=False, + sample_inputs_func=sample_inputs_broadcast_shapes, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # skip dtype tests since broadcast_shape is not device dependent. + # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('broadcast_tensors', + ref=np.broadcast_arrays, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_broadcast_tensors, + reference_inputs_func=reference_inputs_broadcast_tensors, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + )), + OpInfo('block_diag', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Default batching rule in core doesn't work for ops with TensorList args + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_block_diag), + UnaryUfuncInfo('bitwise_not', + ref=np.bitwise_not, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.invert, + supports_autograd=False), + BinaryUfuncInfo('bitwise_left_shift', + op=torch.bitwise_left_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.lshift, + inplace_operator_variant=operator.ilshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('bitwise_right_shift', + op=torch.bitwise_right_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.rshift, + inplace_operator_variant=operator.irshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('combinations', + op=torch.combinations, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_combinations), + OpInfo('cartesian_prod', + op=torch.cartesian_prod, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_cartesian_prod, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, + 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + )), + OpInfo('cdist', + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_cdist), + UnaryUfuncInfo('ceil', + ref=np.ceil, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('cholesky', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), + OpInfo('cholesky_inverse', + dtypes=floating_and_complex_types(), + backward_dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_gradgrad=True, + sample_inputs_func=sample_inputs_linalg_cholesky_inverse, + gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), + OpInfo('cholesky_solve', + op=torch.cholesky_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_cholesky_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + OpInfo('chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('unsafe_chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + check_batched_forward_grad=False, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('clone', + ref=np.copy, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format' + # (NumPy reference needs to be extended with memory_format) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + ),), + OpInfo('contiguous', + op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_fusible_nodes=['aten::contiguous'], + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('sum_to_size', + op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sum_to_size, + error_inputs_func=error_inputs_sum_to_size, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), + )), + OpInfo('clamp', + aliases=('clip',), + ref=_clamp_numpy, + dtypes=all_types_and(torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_clamp, + reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NNC appear to not handle boolean clamp + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + # MPS does not support float64, while numpy does internal computations in float64. + # See https://github.com/pytorch/pytorch/blob/3c1cf03fde145bdbe1f5ffb81765d076c10b4c04/test/test_ops.py#L260-L264 + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('positive', + ref=np.positive, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + ), + UnaryUfuncInfo('conj', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False), + UnaryUfuncInfo('conj_physical', + decomp_aten_name='_conj_physical', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # RuntimeError: inputSet && outputSet + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), + DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), + 'TestSparseUnaryUfuncs', 'test_inplace'), + )), + OpInfo('resolve_conj', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('resolve_neg', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('view_as_real', + dtypes=complex_types(), + supports_forward_ad=True, + supports_out=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_view_as_real, + test_conjugated_samples=False, + ), + OpInfo('view_as_complex', + dtypes=floating_types_and(torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + test_neg_view=False, + sample_inputs_func=sample_inputs_view_as_complex, + skips=( + # RuntimeError: Tensor must have a last dimension with stride 1 + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), + # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + BinaryUfuncInfo('complex', + dtypes=floating_types_and(torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + error_inputs_func=error_inputs_complex, + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)), + BinaryUfuncInfo('copysign', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('corrcoef', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_corrcoef, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False), + UnaryUfuncInfo('cos', + ref=np.cos, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('cosh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + OpInfo('cov', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cov, + error_inputs_func=error_inputs_cov, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Float did not match double + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Jacobian mismatch + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cross', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cross, + supports_fwgrad_bwgrad=True, + supports_out=True, + supports_forward_ad=True), + OpInfo('cumsum', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumsum does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + sample_inputs_func=sample_inputs_cumulative_ops), + OpInfo('cumprod', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumprod does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + # gradgradcheck fails in fast_mode=True: #56275 + sample_inputs_func=sample_inputs_cumprod, + gradcheck_fast_mode=False), + OpInfo('cummax', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('cummin', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + UnaryUfuncInfo('deg2rad', + ref=np.radians, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + OpInfo('diff', + op=torch.diff, + # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append + # are set as None when converting to numpy + ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( + np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) + ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diff, + error_inputs_func=error_inputs_diff, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='no_rounding_mode', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True),), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='trunc_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='floor_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('true_divide', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True)), + OpInfo('equal', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=lambda input, other: (input == other).all(), + sample_inputs_func=sample_inputs_equal, + supports_autograd=False, + supports_tracing=False, + skips=( + )), + UnaryUfuncInfo('exp', + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + OpInfo('expand', + op=lambda self, shape: self.expand(shape), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_expand, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('expand_as', + op=lambda self, other: self.expand_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_expand_as, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), + ), + OpInfo('diag', + ref=np.diag, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_diag, + error_inputs_func=error_inputs_diag), + OpInfo('diag_embed', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal', + aten_backward_name='diagonal_backward', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_scatter), + BinaryUfuncInfo('eq', + ref=np.equal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + always_returns_bool=True, + supports_autograd=False, + sample_inputs_func=sample_inputs_comparison_ops, + skips=( + )), + BinaryUfuncInfo('fmax', + op=torch.fmax, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmin', + op=torch.fmin, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmod', + ref=np.fmod, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + BinaryUfuncInfo('remainder', + ref=np.remainder, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + operator_variant=operator.mod, + inplace_operator_variant=operator.imod, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + # Fails on XLA + # False is not true : Tensors failed to compare as equal! + # Attempted to compare equality of tensors with different dtypes + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + )), + UnaryUfuncInfo('frac', + ref=lambda x: np.modf(x)[0], + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + # 76047 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.bfloat16, torch.float32, torch.float64)), + )), + OpInfo('stft', + decorators=[ + skipCPUIfNoFFT, + DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ], + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_stft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + ), + OpInfo('istft', + dtypes=complex_types(), + sample_inputs_func=sample_inputs_istft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + decorators=( + DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ), + skips=( + skipCPUIfNoFFT, + # gradcheck fails on ROCm (gh-68429) + # grad is computed improperly (probably for weights tensor) + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + )), + UnaryUfuncInfo('floor', + ref=np.floor, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('flip', + op=torch.flip, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_flip, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('fliplr', + op=torch.fliplr, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_fliplr, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('flipud', + op=torch.flipud, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_flipud, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('sparse.sampled_addmm', + dtypes=floating_and_complex_types(), + supports_autograd=True, + sample_inputs_func=sample_inputs_sparse_sampled_addmm, + decorators=[ + skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3)) + or (_get_torch_rocm_version() >= (5, 2))), + "cusparseSDDMM was added in 11.2.1"), + skipCPUIfNoMklSparse, ], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype. + # RuntimeError: Sparse CSR tensors do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # NotImplementedError: Could not run 'aten::sparse_sampled_addmm' with arguments from the 'SparseCsrMeta' backend. + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + )), + OpInfo('sparse.mm', + dtypes=floating_types_and(torch.bfloat16), + variant_test_name='reduce', + supports_autograd=True, + supports_out=False, + supports_gradgrad=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_sparse_mm_reduce, + decorators=[onlyCPU], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # RuntimeError: Sparse CSR tensors do not have is_contiguou + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'), + # NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCsrMeta' backend + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + )), + UnaryUfuncInfo('i0', + ref=np_unary_ufunc_integer_promotion_wrapper( + scipy.special.i0) if TEST_SCIPY else None, + aliases=('special.i0',), + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_i0_i1, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.int8,)), + )), + BinaryUfuncInfo('floor_divide', + ref=_floor_divide_np, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_autograd=False, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + supports_two_python_scalars=True, + skips=( + # AssertionError: Results of original model and exported/imported version of model differed + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + )), + UnaryUfuncInfo('frexp', + op=torch.frexp, + ref=np.frexp, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + # skip testing torch.frexp as it is not supported by ROCm platform yet + decorators=[], + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, + # while theses tests currently requires output to a single tensor. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + + # skips test_reference_numerics due to error in Windows CI. + # The np.frexp returns exponent as np.intc dtype on Windows platform, + # and np.intc does not have the correspond torch dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('log1p', + ref=np.log1p, + aliases=('special.log1p',), + domain=(-1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + promotes_int_to_float=True), + BinaryUfuncInfo('ge', + ref=np.greater_equal, + aliases=('greater_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('geqrf', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + supports_autograd=False, + skips=( + # FIXME: geqrf can't forward with complex inputs that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + BinaryUfuncInfo('gt', + ref=np.greater, + aliases=('greater',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + UnaryUfuncInfo('imag', + ref=np.imag, + dtypes=complex_types_and(torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo('gradient', + dtypes=floating_and_complex_types_and(torch.int8, torch.int16, + torch.int32, torch.int64, + torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # following tests give a runtime error with undefined value tensor + # see discussion : https://github.com/pytorch/pytorch/issues/56660 + # RuntimeError: + # Arguments for call are not valid. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_gradient, + error_inputs_func=error_inputs_gradient), + OpInfo('isin', + dtypes=all_types(), + dtypesIfCUDA=all_types_and(torch.half), + supports_autograd=False, + sample_inputs_func=sample_inputs_isin), + OpInfo('kthvalue', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kthvalue, + error_inputs_func=error_inputs_kthvalue), + BinaryUfuncInfo('le', + ref=np.less_equal, + aliases=('less_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + UnaryUfuncInfo('log', + ref=np.log, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log10', + ref=np.log10, + domain=(0, None), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log10(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log2', + ref=np.log2, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + # log2(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + BinaryUfuncInfo('ldexp', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + promotes_int_to_float=True, + supports_out=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: mul(): functions with out=... arguments don't support + # automatic differentiation, but one of the arguments requires grad + # https://github.com/pytorch/pytorch/issues/68966 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.complex64: tol(atol=1e-05, rtol=1e-05) + }), + 'TestCommon', device_type='cpu', + ), + ], ), + BinaryUfuncInfo('logaddexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + OpInfo('logaddexp2', + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_logaddexp), + UnaryUfuncInfo('logical_not', + ref=np.logical_not, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + skips=( + # The function variant always returns BoolTensor + # while the inplace variant preserves the input dtype. + # >>> t = torch.randn(3) + # >>> torch.logical_not(t) + # tensor([False, False, False]) + # >>> torch.logical_not(t).dtype + # torch.bool + # >>> t.logical_not_().dtype + # torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + )), + BinaryUfuncInfo('lt', + ref=np.less, + aliases=('less',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('lu_unpack', + op=torch.lu_unpack, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + sample_inputs_func=sample_inputs_lu_unpack), + OpInfo('lu', + op=torch.lu, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # we skip jit tests because `lu` is a torch function + # RuntimeError: + # 'Tensor (inferred)' object has no attribute or method 'lu'.: + # File "", line 3 + # def the_method(i0): + # return i0.lu(True, True) + # ~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('lu_solve', + op=torch.lu_solve, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Tests different backward paths"), + "TestCommon", "test_floating_inputs_are_differentiable"),), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]), + OpInfo('masked_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_masked_fill, + error_inputs_func=error_inputs_masked_fill, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + supports_out=False), + OpInfo('masked_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_scatter, + error_inputs_func=error_inputs_masked_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + skips=( + )), + OpInfo('masked_select', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_select, + error_inputs_func=error_inputs_masked_select, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('matrix_exp', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + aliases=('linalg.matrix_exp',), + sample_inputs_func=sample_inputs_matrix_exp, + # Needs to construct a 2nx2n matrix by copy_ ing into it + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # mexp does not support bf16 and fp16 + DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive', + dtypes=[torch.half], device_type="cpu"), + ), + supports_out=False, + ), + OpInfo('matmul', + aliases=('linalg.matmul',), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False), + decorators=[ + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # ROCm intermittently fails the test with standard atol/rtol + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', + active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_out', device_type='cuda', + active_if=TEST_WITH_ROCM), + # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the + # backward on CPU + DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu'), + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-5, rtol=1e-5), + torch.complex64: tol(atol=1e-5, rtol=1e-5), + }), + "TestDecomp", "test_comprehensive", device_type="cuda", + ), + ], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', + device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + OpInfo('max', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + skips=( + ), + supports_forward_ad=True), + OpInfo('max', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('median', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of median do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_median, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('nanmedian', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of nanmedian do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('var_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('var_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of std_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('meshgrid', + variant_test_name='variadic_tensors', + ref=np.meshgrid, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), + skips=[ + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ], + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('meshgrid', + variant_test_name='list_of_tensors', + # Unlike the variant above, we do not use np.meshgrid as a + # ref since it does not officially support list of numpy + # arrays. + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), + skips=[ + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + ], + assert_autodiffed=True, + supports_out=False, + autodiff_nonfusible_nodes=[], + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('min', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + )), + OpInfo('min', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('quantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + OpInfo('nanquantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + BinaryUfuncInfo( + 'max', + aliases=('maximum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'maximum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'min', + aliases=('minimum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo( + 'minimum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + ), + ), + BinaryUfuncInfo('logical_and', + ref=np.logical_and, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_or', + ref=np.logical_or, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_xor', + ref=np.logical_xor, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False, + skips=( + )), + BinaryUfuncInfo('bitwise_and', + ref=np.bitwise_and, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.and_, + inplace_operator_variant=operator.iand, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_or', + ref=np.bitwise_or, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.or_, + inplace_operator_variant=operator.ior, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_xor', + ref=np.bitwise_xor, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.xor, + inplace_operator_variant=operator.ixor, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('heaviside', + ref=lambda a, b: ( + # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 + np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) + ), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + BinaryUfuncInfo('lcm', + ref=np.lcm, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('gcd', + ref=np.gcd, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)),)), + BinaryUfuncInfo('isclose', + ref=np.isclose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_isclose, + error_inputs_func=error_inputs_isclose, + supports_autograd=False, + supports_out=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_refs', dtypes=(torch.complex128,)), + # RuntimeError: Short did not match Int + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + # `softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + aten_name='softmax', + aten_backward_name='_softmax_backward_data', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + variant_test_name="with_dtype", + aten_name='softmax', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo( + '_softmax_backward_data', + op=torch.ops.aten._softmax_backward_data, + aten_name='_softmax_backward_data', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_softmax_backward_data, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + ), + # `softmin` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('nn.functional.softmin', + aten_name='softmin', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=False, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.softmin', + variant_test_name="with_dtype", + aten_name='softmin', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo( + "nn.functional.cross_entropy", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_cross_entropy, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 + # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked + # 1536 bytes CUDA memory on device 0 + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + DecorateInfo(unittest.skip("FP16 corss_entropy cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + + ) + ), + OpInfo('nn.functional.normalize', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_normalize, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('aminmax', + ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(onlyNativeDeviceTypes,), + supports_autograd=False, + sample_inputs_func=sample_inputs_aminmax, + error_inputs_func=error_inputs_aminmax_amax_amin), + OpInfo('as_strided', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # AssertionError: False is not true : Scalars failed to compare as equal! + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_variant_consistency_eager'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_complex_half_reference_testing'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), + )), + OpInfo('as_strided', + variant_test_name='partial_views', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_partial_views, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: This operator is not Composite Compliant: the + # storage_offset of the tensor was modified directly without + # going through the PyTorch dispatcher. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + + # These fail because the test changes the input's in-memory layout + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', + 'test_make_fx_symbolic_exhaustive_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # Fail but are also flaky + DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'), + DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon', + 'test_non_standard_bool_values'), + # RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a + # storage size of 28 are out of bounds for storage of size 20 + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'), + )), + OpInfo('as_strided_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_scatter, + error_inputs_func=error_inputs_as_strided_scatter, + skips=( + DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 + DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 + DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # AssertionError: Tensor-likes are not close! (new_empty_strided.default) + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)), + OpInfo('native_layer_norm', + aten_name='native_layer_norm', + ref=reference_native_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + assert_jit_shape_analysis=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_native_layer_norm, + error_inputs_func=error_inputs_native_layer_norm, + skips=( + # IndexError: tuple index out of range + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'), + # Tests fail when weight=None and bias is defined + # https://github.com/pytorch/pytorch/issues/79705 + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + # JIT test also tries to compute double backward, which fails + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + OpInfo('native_batch_norm', + aten_name='native_batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs_native_batch_norm, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + # AssertionError: Booleans mismatch: True is not False + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_native_batch_norm_legit', + aten_name='_native_batch_norm_legit', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs__native_batch_norm_legit, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_batch_norm_with_update', + op=torch.ops.aten._batch_norm_with_update, + aten_name='_batch_norm_with_update', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs__batch_norm_with_update, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + # _batch_norm_with_update expects contiguous inputs for cudnn and miopen + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type="cuda"), + DecorateInfo(unittest.expectedFailure, + 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides', device_type="cuda"), + # _batch_norm_with_update does not have python bindings + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # aten out variants do not accept out= kwarg, only python out variants + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + ) + ), + OpInfo('nn.functional.cosine_similarity', + aten_name="cosine_similarity", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_similarity), + OpInfo('nn.functional.adaptive_avg_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool1d, + sample_inputs_func=sample_inputs_adaptive_avg_pool1d), + OpInfo('nn.functional.adaptive_avg_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool2d, + sample_inputs_func=sample_inputs_adaptive_avg_pool2d), + OpInfo('nn.functional.adaptive_avg_pool3d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool3d, + sample_inputs_func=sample_inputs_adaptive_avg_pool3d), + OpInfo('nn.functional.adaptive_max_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool1d, + sample_inputs_func=sample_inputs_adaptive_max_pool1d), + OpInfo('nn.functional.adaptive_max_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool2d, + sample_inputs_func=sample_inputs_adaptive_max_pool2d), + OpInfo('nn.functional.adaptive_max_pool3d', + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool3d, + sample_inputs_func=sample_inputs_adaptive_max_pool3d), + OpInfo('nn.functional.avg_pool1d', + aten_name='avg_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool1d, + sample_inputs_func=sample_inputs_avgpool1d), + OpInfo('nn.functional.avg_pool3d', + aten_name='avg_pool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool3d, + sample_inputs_func=sample_inputs_avgpool3d, + skips=( + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + )), + OpInfo( + "nn.functional.binary_cross_entropy_with_logits", + aten_name="binary_cross_entropy_with_logits", + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + 'TestJit', + 'test_variant_consistency_jit', + dtypes=(torch.float32,) + ), + ), + ), + UnaryUfuncInfo( + 'nn.functional.relu', + aten_name="relu", + ref=lambda a: np.where(a <= 0, 0, a), + supports_autograd=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_activation_relu, + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True), + OpInfo('nn.functional.conv_transpose1d', + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d), + aten_name='conv_transpose1d', + aliases=('conv_transpose1d',), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64,)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float,)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose2d', + aten_name='conv_transpose2d', + aliases=('conv_transpose2d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose2d, + # Runs very slowly on slow-gradcheck for complex. + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # AssertionError: None mismatch: torch.complex64 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose3d', + aten_name='conv_transpose3d', + aliases=('conv_transpose3d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and( + torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose3d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }), + 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }), + 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', + active_if=TEST_CUDNN), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}), + "TestMathBits", "test_conj_view", device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-1), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=[torch.complex32], active_if=TEST_WITH_ROCM), + ), + supports_out=False,), + OpInfo('nn.functional.conv1d', + aliases=('conv1d',), + aten_name='conv1d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv1d, + error_inputs_func=error_inputs_conv1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing' + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv2d', + aliases=('conv2d',), + aten_name='conv2d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=partial(sample_inputs_conv2d), + error_inputs_func=error_inputs_conv2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv3d', + aliases=('conv3d',), + aten_name='conv3d', + dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv3d, + error_inputs_func=error_inputs_conv3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + # TF32 + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-3, rtol=1e-3)}), + 'TestCommon', 'test_noncontiguous_samples', + ), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=5e-5, rtol=5e-6)}), + 'TestMathBits', 'test_conj_view', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-6)}), + 'TestOperators', 'test_vjpvmap', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + # AssertionError: Tensor-likes are not close! + # break slow tests + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.group_norm', + aten_name='group_norm', + aliases=('group_norm',), + ref=reference_group_norm, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_group_norm, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) + ], + sample_inputs_func=sample_inputs_group_norm, + reference_inputs_func=reference_inputs_group_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.instance_norm', + # no ref because instance_norm will often have numerical instability (large numbers or nan) + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + allow_cow_input_materialize_forward=['running_mean', 'running_var'], + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_instance_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.layer_norm', + aten_name='layer_norm', + aten_backward_name='layer_norm_backward', + aliases=('layer_norm',), + ref=reference_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), + 'TestCommon', 'test_numpy_refs' + ), + DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'), + ], + sample_inputs_func=sample_inputs_layer_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.rms_norm', + aten_name='rms_norm', + aliases=('rms_norm',), + ref=reference_rms_norm, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rms_norm, + error_inputs_func=error_inputs_rms_norm,), + OpInfo('nn.functional.local_response_norm', + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_local_response_norm,), + OpInfo('constant_pad_nd', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_constant_pad_nd, + supports_out=False, + skips=( + # bool can't be passed to Scalar arguments in JIT tracer because + # BoolType is not a subtype of ScalarType. + DecorateInfo( + unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('nn.functional.pad', + variant_test_name='constant', + aten_name='constant_pad_nd', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='reflect', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate_negative', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_pad_replicate_negative, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Some negative padding cases cause a segfault on MPS + DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='circular', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Difference from is larger with decomposition new_empty_strided.default than original on output 0 + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), + ), + supports_out=False), + OpInfo('nn.functional.hardswish', + aten_name="hardswish", + aten_backward_name='hardswish_backward', + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardswish, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_nonfusible_nodes=["aten::hardswish"]), + OpInfo('nn.functional.unfold', + aten_name='im2col', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_unfold, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # NOTE: this failure may not reproduce consistently on different systems + # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 + DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest-exact', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.uint8), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + # NotImplementedError: The operator 'aten::_upsample_nearest_exact3d.out' is not currently implemented + # for the MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='linear', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bilinear', + supports_fwgrad_bwgrad=True, + supports_autograd=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bicubic', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='trilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='area', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'area'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.upsample_bilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), + reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('_upsample_bilinear2d_aa', + op=torch.ops.aten._upsample_bilinear2d_aa, + aten_name='_upsample_bilinear2d_aa', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestEagerFusionOpInfo', 'test_aot_autograd_symbolic_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo( + "nn.functional.soft_margin_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + # doesn't support grad on target + sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), + error_inputs_func=error_inputs_soft_margin_loss, + ), + OpInfo('nn.functional.upsample_nearest', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo( + "nn.functional.margin_ranking_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_margin_ranking_loss, + error_inputs_func=error_inputs_margin_ranking_loss, + reference_inputs_func=reference_inputs_margin_ranking_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo( + "nn.functional.multi_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multi_margin_loss, + reference_inputs_func=reference_inputs_multi_margin_loss, + error_inputs_func=error_inputs_multi_margin_loss, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + OpInfo( + "nn.functional.multilabel_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multilabel_margin_loss, + reference_inputs_func=reference_inputs_multilabel_margin_loss, + error_inputs_func=error_inputs_multilabel_margin_loss, + ), + OpInfo('nn.functional.leaky_relu', + aliases=None, + aten_name="leaky_relu", + aten_backward_name='leaky_relu_backward', + sample_inputs_func=sample_inputs_leaky_relu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + inplace_variant=lambda x, negative_slope=0.01: + torch.nn.functional.leaky_relu(x, negative_slope, inplace=True), + supports_autograd=True, + assert_autodiffed=True, + supports_gradgrad=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::leaky_relu"]), + OpInfo( + "nn.functional.multilabel_soft_margin_loss", + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 + # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 + # leaked 4096 bytes CUDA memory on device 0 + DecorateInfo( + # Skip instead of expectedFailure because this fails + # locally for me but passes in CI. + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo('nn.functional.avg_pool2d', + aten_name='avg_pool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_avg_pool2d, + sample_inputs_func=sample_inputs_avgpool2d, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('nn.functional.fractional_max_pool2d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + sample_inputs_func=sample_inputs_fractional_max_pool2d, + decorators=( + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.fractional_max_pool3d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_fractional_max_pool3d, + decorators=( + # FIXME: both derivatives are implemented incorrectly + # https://github.com/pytorch/pytorch/issues/69322 + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.max_pool1d', + aten_name='max_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + # RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet. + # Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data() + # to actually allocate memory + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + ), + error_inputs_func=error_inputs_max_pool1d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_pool2d', + aten_name='max_pool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + assert_jit_shape_analysis=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_max_pool2d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('max_pool2d_with_indices_backward', + op=max_pool2d_backward, + # We've defined a custom op, so there's no corresponding aten op + aten_name=None, + method_variant=None, + inplace_variant=None, + operator_variant=None, + inplace_operator_variant=None, + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_max_pool, + skips=( + # We've defined a custom op here, and we don't handle the case where we receive an out kwarg + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit') + )), + OpInfo('nn.functional.max_pool3d', + aten_name='max_pool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + # TODO: investigate nondeterminism + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_max_pool3d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_unpool1d', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad', + device_type='cpu'), + )), + OpInfo('nn.functional.max_unpool1d', + variant_test_name='grad', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool2d', + aten_name='max_unpool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool2d', + variant_test_name='grad', + aten_name='max_unpool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_grad=False, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool3d', + aten_name='max_unpool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool3d', + variant_test_name='grad', + aten_name='max_unpool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.linear', + aten_name='linear', + supports_autograd=True, + supports_gradgrad=True, + sample_inputs_func=sample_inputs_linear, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # linear calls mm under the hood which is nondeterministic on CUDA + # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_expanded_weight=True, + decorators=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('nn.functional.bilinear', + aten_name='bilinear', + supports_autograd=True, + sample_inputs_func=sample_inputs_bilinear, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), + decorators=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=5e-05, rtol=1e-03)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.glu', + aten_name='glu', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + sample_inputs_func=sample_inputs_glu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + UnaryUfuncInfo( + 'nn.functional.elu', + aten_backward_name='elu_backward', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.elu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + # Marked as a Unary function because it has some rather odd broadcasting semantics in its + # second argument + UnaryUfuncInfo( + 'nn.functional.prelu', + aten_backward_name='_prelu_kernel_backward', + ref=lambda x, weight: + np.maximum(0., x) + np.minimum(0., x) * + (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + # test_reference_numerics only tests the case when the weight tensor is a scalar + sample_kwargs=sample_kwargs_prelu_scalar_weight, + error_inputs_func=error_inputs_prelu, + sample_inputs_func=sample_inputs_prelu, + reference_inputs_func=reference_inputs_prelu, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + # https://github.com/pytorch/pytorch/issues/68752 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], + ), + UnaryUfuncInfo( + 'nn.functional.celu', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.celu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + UnaryUfuncInfo( + 'nn.functional.rrelu', + aten_backward_name='rrelu_with_noise_backward', + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + (dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)), + sample_inputs_func=sample_inputs_rrelu, + error_inputs_func=error_inputs_rrelu, + decorators=( + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # In-place operations do not play well with forward AD + # https://github.com/pytorch/pytorch/issues/77447 + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', + 'test_inplace_forward_mode_AD'), + # The noise vector that's generated in these tests is not the same elementwise + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + UnaryUfuncInfo( + 'nn.functional.selu', + ref=lambda x, inplace=False: + 1.0507009873554804934193349852946 * ( + np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) + ), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, # depends on 'elu' + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + OpInfo( + 'torch._scaled_mm', + sample_inputs_func=sample_inputs_scaled_mm, + dtypes=empty_types(), + dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,), + supports_out=True, + supports_forward_ad=False, + supports_autograd=False, + decorators=[skipCUDAIf(not SM90OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 9.0')], + skips=( + # Sample inputs isn't really parametrized on dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda'), + # "mul_cuda" not implemented for float8_e4m3fn + # https://github.com/pytorch/pytorch/issues/107256 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.float8_e4m3fn,)), + ) + ), + OpInfo( + 'nn.functional.scaled_dot_product_attention', + op=lambda *args, **kwargs: + wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs), + sample_inputs_func=sample_inputs_scaled_dot_product_attention, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=False, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=[DecorateInfo(toleranceOverride( + {torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ], + skips=( + # When attn mask is a composite tensor this fails backward by returning a none + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + # This is only failing on Linux Bionic 3.10 Cuda 11.6 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', + dtypes=(torch.float32,)), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Forward works for dtype=float64 which is the math path + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # Not implemented for Forward AD + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + device_type='cpu'), + # Not implemented for backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad', + device_type='cpu'), + # CPU and CUDA have inconsistencies for intermediate outputs + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cpu'), + # When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', + device_type='cpu'), + # OpInfo was implemented with a lambda + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO Need to understand what this is testing and why it doesn't work + DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'), + # TODO skip this for now since we can't skip on runtime arch support + DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'), + # skip for sm < 80 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + # FIXME + DecorateInfo(unittest.skip('test_cow_input does not work with efficient attention on ROCM'), + 'TestCompositeCompliance', 'test_cow_input', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.skip('test_fake_crossref_backward_amp does not work with efficient attention on ROCM'), + 'TestFakeTensor', 'test_fake_crossref_backward_amp', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.skip('test_fake_crossref_backward_no_amp does not work with efficient attention on ROCM'), + 'TestFakeTensor', 'test_fake_crossref_backward_no_amp', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + # registered in fake_impls.py instead of _meta_registrations.py, so meta kernels will fail. + # However, for implementations that fall back to the constituent ops, the meta kernels may not + # fail. Fused kernels will fail, whereas unfused kernels will not fail. + # All fused kernels support bf16 and fp16 - so if fused attention is supported, the test will fail. + # mem_eff_attention also supports fp32 - so if it is supported the test will fail. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.bfloat16, torch.float16), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + # TODO: float32 support in ROCM efficient attention + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bfloat16, torch.float16), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.bfloat16, torch.float16,), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION),), + ), + OpInfo( + 'torch.ops.aten._flash_attention_forward', + sample_inputs_func=sample_inputs_flash_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16) + if not SM80OrLater + else custom_types(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # meta implementation is in fake_impls.py instead of being a meta registration + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + # Checking the scalar value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + OpInfo( + 'torch.ops.aten._efficient_attention_forward', + sample_inputs_func=sample_inputs_efficient_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16, torch.float32) + if not SM80OrLater + else custom_types(torch.float16, torch.float32, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + # TODO: Skip because it produces a CUDA illegal memory access for some reason + skip_cow_input_backward=True, + # FIXME: mask_type == 2 (LowerRight) + decorators=[ + skipCUDAIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "This platform doesn't support efficient attention"), + skipCUDAIf(TEST_WITH_ROCM, "Efficient attention on ROCM doesn't support custom_mask_type==2")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # meta implementation is in fake_impls.py instead of being a meta registration + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + # Checking the scaler value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + UnaryUfuncInfo( + 'nn.functional.silu', + aten_backward_name='silu_backward', + ref=lambda x, inplace=False: x / (1 + np.exp(-x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_autograd=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,), device_type='cpu'), + ), + autodiff_nonfusible_nodes=["aten::silu"], + ), + # TODO: combine this with the nn.functional.silu OpInfo when + # complex autodiff for silu is supported or when + # the forward bug is fixed + # Note: silu errors when given inputs that require grad + # but it doesn't support grad in their dtype + # This is why the dtypes list above passes test_dtypes, + # because it's getting lucky and failing in forward + # because test_dtypes sets requires_grad to True + # THIS IS A BUG + UnaryUfuncInfo( + 'nn.functional.silu', + variant_test_name='complex', + ref=lambda x, inplace=False: + x / (1 + np.exp(-x)), + dtypes=complex_types(), + dtypesIfCUDA=complex_types(), + supports_forward_ad=False, + supports_autograd=False, + assert_autodiffed=False, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,)), + # FIXME: intentionally misreports dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.complex64, torch.cdouble)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.complex64,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.complex64,)))), + UnaryUfuncInfo( + 'nn.functional.hardsigmoid', + aten_backward_name='hardsigmoid_backward', + ref=reference_hardsigmoid, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=False, + supports_forward_ad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], + skips=[ + # still want to test that first derivative works though second derivative isn't supported + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad"), + # produces 0 instead of nan on ROCM + DecorateInfo(unittest.expectedFailure, + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + device_type='cuda', + active_if=(TEST_WITH_ROCM)), ] + ), + UnaryUfuncInfo( + 'nn.functional.logsigmoid', + aten_name="log_sigmoid", + aten_backward_name='log_sigmoid_backward', + ref=reference_logsigmoid, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_autograd=True, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + # autodiff_nonfusible_nodes=["aten::log_sigmoid"], + decorators=[ + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + ], + skips=( + # Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), + ), + ), + UnaryUfuncInfo( + 'nn.functional.mish', + aten_backward_name='mish_backward', + ref=lambda x: x * np.tanh(reference_softplus(x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.mish, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], + ), + UnaryUfuncInfo( + 'nn.functional.softsign', + ref=lambda x: x / (np.abs(x) + 1), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.int, torch.int8)),), + ), + UnaryUfuncInfo( + 'nn.functional.tanhshrink', + ref=lambda x: x - np.tanh(x), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05), + torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + # tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan. + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0) + ), + UnaryUfuncInfo( + 'nn.functional.threshold', + ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype), + dtypes=all_types_and(torch.half, torch.bfloat16), + inplace_variant=lambda x, threshold, value: + torch.nn.functional.threshold(x, threshold, value, inplace=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}, + {'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}), + # TODO(whc) should not need sample_inputs_func, but without it + # kwargs aren't being hooked up properly + sample_inputs_func=sample_inputs_threshold, + ), + OpInfo( + "nn.functional.triplet_margin_loss", + sample_inputs_func=sample_inputs_triplet_margin_loss, + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "nn.functional.triplet_margin_with_distance_loss", + sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # This test cannot handle a callable passed to `distance_function`. If we would use + # `distance_function=None`, the test would pass fine. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + BinaryUfuncInfo('nextafter', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False), + OpInfo( + "to", + op=lambda x, *args, **kwargs: x.to(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_to, + skips=( + # RuntimeError: undefined value cpu + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + # NotImplementedError: Cannot copy out of meta tensor; no data! + DecorateInfo( + unittest.skip("Skipped!"), + "TestMeta", + "test_meta_outplace", + ), + # https://github.com/pytorch/pytorch/issues/84335 + DecorateInfo( + unittest.skip("Skipped!"), + "TestProxyTensorOpInfo", + "test_make_fx_symbolic_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + OpInfo('topk', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_topk), + # Multiple variants for batch_norm to test with and without cuDNN disabled + # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details + OpInfo('nn.functional.batch_norm', + aten_name='batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[1, 2], + allow_cow_input_materialize_backward=[1, 2], + sample_inputs_func=sample_inputs_batch_norm, + skips=( + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.bfloat16, torch.float16)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}), + 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), + )), + # This variant tests batch_norm with cuDNN disabled only on CUDA devices + OpInfo('nn.functional.batch_norm', + variant_test_name='without_cudnn', + aten_name='batch_norm', + dtypes=empty_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + allow_cow_input_materialize_forward=[1, 2], + allow_cow_input_materialize_backward=[1, 2], + decorators=[onlyCUDA, disablecuDNN], + skips=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}), + 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_batch_norm), + OpInfo( + "nn.functional.binary_cross_entropy", + aten_backward_name='binary_cross_entropy_backward', + sample_inputs_func=sample_inputs_binary_cross_entropy, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + gradcheck_fast_mode=False, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestCudaFuserOpInfo", + ), + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + ), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120783 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + ), + # RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5] + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + ), + skips=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the + # standard entry, second is to run gradcheck tests on the second argument. + BinaryUfuncInfo('igamma', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammainc',), + dtypesIfCUDA=floating_types(), + # TODO: FIXME + supports_rhs_python_scalar=False, + supports_autograd=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implemented grad for both inputs + # BinaryUfuncInfo('igamma', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments. + # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # skips=( + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"),"), + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + BinaryUfuncInfo('igammac', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammaincc',), + dtypesIfCUDA=floating_types(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implementing grad for both inputs + # BinaryUfuncInfo('igammac', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments + # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"), + # ], + # skips=( + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + UnaryUfuncInfo('nn.functional.softshrink', + aten_name="softshrink", + aten_backward_name='softshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_softshrink, + error_inputs_func=error_inputs_softshrink), + UnaryUfuncInfo('nn.functional.hardshrink', + aten_name="hardshrink", + aten_backward_name='hardshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardshrink, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardshrink"]), + UnaryUfuncInfo('nn.functional.hardtanh', + aten_name="hardtanh", + aten_backward_name='hardtanh_backward', + dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16), + backward_dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardtanh, + error_inputs_func=error_inputs_hardtanh, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardtanh"]), + OpInfo('nn.functional.gelu', + aten_name="gelu", + aten_backward_name='gelu_backward', + ref=reference_gelu if TEST_SCIPY else None, + error_inputs_func=error_inputs_gelu, + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_gelu, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::gelu"], + skips=( + # AssertionError: Tensor-likes are not close! + # May not replicate in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('nn.functional.relu6', + aten_name="relu6", + dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypes=floating_types_and(torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::relu6"]), + OpInfo('mm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('mode', + op=torch.mode, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Resized a non-empty tensor but did not warn about it + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + ), + sample_inputs_func=sample_inputs_mode,), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1', + domain=(1, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3', + domain=(2, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5', + domain=(3, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), + BinaryUfuncInfo('ne', + ref=np.not_equal, + aliases=('not_equal',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('narrow', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False), + skips=( + # Use of .item() + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo('narrow_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + supports_autograd=False, + # https://github.com/pytorch/pytorch/issues/86931 + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False), + skips=( + # https://github.com/pytorch/pytorch/issues/84577 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Lazy tensor failures: mutating and aliasing ops should all have codegen'd kernels + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('view_copy', + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + ref=lambda x, newshape: np.reshape(x, newshape).copy(), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + sample_inputs_func=sample_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape), + UnaryUfuncInfo('neg', + aliases=('negative', ), + ref=np.negative, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + error_inputs_func=error_inputs_neg, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('dist', + op=torch.dist, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_dist), + OpInfo('outer', + op=torch.outer, + aliases=('ger', ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_outer,), + OpInfo('ormqr', + op=torch.ormqr, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_ormqr, + error_inputs_func=error_inputs_ormqr, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('permute', + ref=np.transpose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_varargs=True, + sample_inputs_func=sample_inputs_permute, + reference_inputs_func=reference_inputs_permute), + BinaryUfuncInfo('pow', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + ref=np.power, + # Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled + # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently + # unsupported on CPU. + backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + # FIXME Complex values error with: Greatest absolute difference: nan at index + # Ref: https://github.com/pytorch/pytorch/issues/76853 + # For `chalf`, reference computation in `numpy` is computed in `cfloat`. + # Output of `chalf` saturates to `inf` quicker than reference due to its small range + # which leads to failure of this test. + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + )), + BinaryUfuncInfo('float_power', + ref=np.float_power, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # FIXME + # AssertionError: Object comparison failed: torch.float64 != torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # -3.43399e+38 is outside the range of representable values of type 'float' + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + # Inplace always promotes to double and thus other floating dtypes are not supported + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bfloat16, torch.float16, torch.float32]), + )), + OpInfo('qr', + op=torch.qr, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]), + UnaryUfuncInfo('rad2deg', + ref=np.degrees, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + UnaryUfuncInfo('real', + ref=np.real, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo( + "roll", + ref=np.roll, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + error_inputs_func=error_inputs_roll, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_roll, + decorators=(onlyNativeDeviceTypes,), + ), + OpInfo( + "rot90", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + error_inputs_func=error_inputs_rot90, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rot90, + ), + # To test reference numerics against multiple values of argument `decimals`, + # we make multiple OpInfo entries with each entry corresponding to different value of decimals. + UnaryUfuncInfo('round', + ref=np.round, + aliases=('special.round',), + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + ), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_0', + aliases=('special.round',), + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_neg_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('sin', + ref=np.sin, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), + UnaryUfuncInfo('sinc', + ref=np_sinc_with_fp16_as_fp32, + aliases=('special.sinc',), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('sinh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('sign', + ref=reference_sign, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + )), + UnaryUfuncInfo('sgn', + ref=reference_sgn, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True), + OpInfo('split', + # Cannot declare this aten_name because of + # test_variant_consistency_jit_split_list_args_cpu_float32 + decomp_aten_name='split_with_sizes', + variant_test_name='list_args', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=partial(sample_inputs_split, list_args=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + # `unsafe_split` supports only `int` for split_size argument + OpInfo('unsafe_split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True, + check_batched_forward_grad=False), + OpInfo('split_with_sizes', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo('split_with_sizes_copy', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # No error raised + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_requires_grad_error"), + )), + BinaryUfuncInfo('__radd__', + op=torch.Tensor.__radd__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::add'],), + BinaryUfuncInfo('__rdiv__', + op=torch.Tensor.__rdiv__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + promotes_int_to_float=True, + lhs_make_tensor_kwargs={'exclude_zero': True}, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + skips=( + # https://github.com/pytorch/pytorch/issues/76806 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), + BinaryUfuncInfo('__rmul__', + op=torch.Tensor.__rmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::mul'],), + BinaryUfuncInfo('__rand__', + op=torch.Tensor.__rand__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__ror__', + op=torch.Tensor.__ror__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__rxor__', + op=torch.Tensor.__rxor__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('__rmatmul__', + op=torch.Tensor.__rmatmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}), + "TestDecomp", "test_comprehensive", device_type="cuda", + active_if=TEST_WITH_ROCM), + ), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + BinaryUfuncInfo('__rmod__', + op=torch.Tensor.__rmod__, + dtypes=floating_types_and(torch.bfloat16, torch.half,), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + # Support autograd after torch.remainder(Tensor, Tensor) supports + # autograd of the second argument. + # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 + # supports_autograd=False, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::remainder'],), + BinaryUfuncInfo('__rpow__', + op=torch.Tensor.__rpow__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + # Reference: https://github.com/pytorch/pytorch/issues/54774 + # "log2" "_vml_cpu" not implemented for Half + backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # TODO: FIXME tolerance is too high + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'), + DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::pow'],), + BinaryUfuncInfo('__rsub__', + op=torch.Tensor.__rsub__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::rsub'],), + BinaryUfuncInfo('rsub', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_inplace_autograd=False, + assert_autodiffed=None, + sample_inputs_func=sample_inputs_add_sub), + OpInfo('select', + aten_backward_name='select_backward', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_select, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('select_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_select_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('slice', + op=torch.ops.aten.slice.Tensor, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_slice, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_scripting=False, + supports_inplace_autograd=False, + supports_out=False), + OpInfo('slice_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_slice_scatter, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Internal error: pybind11::error_already_set called while + # Python error indicator not set. + # TODO: Investigate this more + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_out'), + ), + supports_out=True), + UnaryUfuncInfo('signbit', + ref=np.signbit, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False,), + UnaryUfuncInfo('tan', + ref=np.tan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), + UnaryUfuncInfo('tanh', + ref=np.tanh, + aten_backward_name='tanh_backward', + aliases=('nn.functional.tanh',), + decorators=(precisionOverride({torch.bfloat16: 1e-2}), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(j * pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + OpInfo('tensor_split', + ref=np.array_split, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + ), + sample_inputs_func=sample_inputs_tensor_split,), + OpInfo('hsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_hsplit, + error_inputs_func=error_inputs_hsplit,), + OpInfo('vsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_vsplit, + error_inputs_func=error_inputs_vsplit,), + OpInfo('dsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_dsplit, + error_inputs_func=error_inputs_dsplit,), + OpInfo('triangular_solve', + op=torch.triangular_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_legacy_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # Gradcheck fails + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=floating_and_complex_types()), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + UnaryUfuncInfo('trunc', + aliases=('fix', ), + ref=np.trunc, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + UnaryUfuncInfo('exp2', + aliases=('special.exp2', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('expm1', + aliases=('special.expm1', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + assert_autodiffed=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('nan_to_num', + ref=np.nan_to_num, + dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # Passing numpy_kwargs via sample_kwargs, as numpy does comparison + # with BFloat16 in float, since it currently doesn't support BFloat16. + # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 + sample_kwargs=lambda device, dtype, input: ({}, + {'posinf': torch.finfo(torch.bfloat16).max, + 'neginf': torch.finfo(torch.bfloat16).min}) + if dtype is torch.bfloat16 else ({}, {})), + UnaryUfuncInfo('reciprocal', + ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('rsqrt', + ref=lambda x: np.reciprocal(np.sqrt(x)), + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.half: 5e-2}),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + )), + UnaryUfuncInfo('sqrt', + ref=np.sqrt, + supports_sparse=True, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('square', + ref=np.square, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + # >>> t = torch.tensor(complex(-0.01, float("inf"))) + # >>> np.square(t.numpy()) + # (-inf-infj) + # >>> t.square() + # tensor(-inf-infj) + # >>> t.cuda().square() + # tensor(inf+nanj, device='cuda:0') + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=[torch.bool]), + ),), + OpInfo('lerp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_lerp, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('angle', + ref=np.angle, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_complex_to_float=True, + skips=( + # Ref: https://github.com/pytorch/pytorch/issues/78413 + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),), + )), + UnaryUfuncInfo('isfinite', + ref=np.isfinite, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isinf', + ref=np.isinf, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isposinf', + ref=np.isposinf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isneginf', + ref=np.isneginf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isreal', + ref=np.isreal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isnan', + ref=np.isnan, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + OpInfo('einsum', + # we need this lambda because SampleInput expects tensor input as the first argument + # TODO(@heitorschueroff) update SampleInput to handle such cases + op=lambda tensors, equation: torch.einsum(equation, tensors), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + sample_inputs_func=sample_inputs_einsum, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # test does not work with passing lambda for op + # there's a test `test_einsum` in `test_jit.py` to handle this case + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('svd', + op=torch.svd, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_svd, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('svd_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + # Due to the use of randomness + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_svd_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.complex64: tol(atol=1e-02, rtol=1e-02)}), + 'TestCommon', 'test_noncontiguous_samples'), + # FIXME This should be the following, but the toleranceOverride does not seem to do anything! + # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), + # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + DecorateInfo(unittest.skip("See comment above"), + 'TestFwdGradients', + 'test_fn_fwgrad_bwgrad', + dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("See comment above"), + 'TestBwdGradientsCUDA', + 'test_fn_gradgrad', + dtypes=[torch.complex128]), + ], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('pca_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_pca_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.complex64: tol(atol=4e-02, rtol=4e-02)}), + 'TestCommon', 'test_noncontiguous_samples'), + # FIXME This should be the following, but the toleranceOverride does not seem to do anything! + # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), + # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + DecorateInfo(unittest.skip("See comment above"), + 'TestFwdGradients', + 'test_fn_fwgrad_bwgrad', + dtypes=[torch.complex128]), + + ], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('polar', + dtypes=floating_types(), + # this function is undefined if 'abs' values are <0 + supports_forward_ad=True, + lhs_make_tensor_kwargs=dict(low=0), + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 + # Numerical: + # tensor([[0.]], dtype=torch.float64) + # Analytical: + # tensor([[-0.0047]], dtype=torch.float64, grad_fn=) + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + )), + # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. + # To test reference numerics against multiple values of argument `n`, + # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). + # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_0', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)), + *(UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name=f'polygamma_n_{n_}', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + decorators=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-3)}), 'TestUnaryUfuncs'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e1, rtol=1e-1), + torch.float32: tol(atol=1e-4, rtol=1e-2)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal', + active_if=IS_WINDOWS), + ), + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + sample_kwargs=lambda device, dtype, input: ({'n': n_}, {'n': n_}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)) + for n_ in (1, 2, 3, 4)), + OpInfo('ravel', + ref=np.ravel, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_ravel, + ), + OpInfo('unravel_index', + ref=np.unravel_index, + dtypes=integral_types_and(), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_unravel_index, + ), + OpInfo('reshape', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('reshape_as', + op=lambda x, other: x.reshape_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('view', + op=lambda x, shape: x.view(shape), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('view_as', + op=lambda x, other: x.view_as(other), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides") + )), + OpInfo('atleast_1d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_atleast1d2d3d, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + ), + OpInfo('atleast_2d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('atleast_3d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('flatten', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_flatten, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_flatten, + reference_inputs_func=reference_inputs_flatten, + ), + OpInfo('unflatten', + op=torch.unflatten, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_unflatten, + ), + OpInfo('column_stack', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),), + sample_inputs_func=sample_inputs_column_stack,), + OpInfo('pinverse', + op=torch.pinverse, + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_invertible, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('gather', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_gather, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_gather, + ), + OpInfo('index_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'), + ), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True)), + OpInfo('index_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_select', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_select, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_add', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_add, + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + *(OpInfo('index_reduce', + variant_test_name=reduction_type, + dtypes=all_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive'), + ), + supports_out=True, + sample_inputs_func=sample_inputs_index_reduce, + ) for reduction_type in ('mean', 'prod', 'amin', 'amax')), + OpInfo('__getitem__', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + supports_scripting=False, + op=torch.Tensor.__getitem__, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), + sample_inputs_func=sample_inputs_getitem), + OpInfo('index_put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + test_neg_view=False, + sample_inputs_func=sample_inputs_index_put, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64], + device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)), + )), + OpInfo('sort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + )), + OpInfo('unique', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.uint16, torch.uint32, torch.uint64), + sample_inputs_func=sample_inputs_unique, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('unique_consecutive', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique_consecutive, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, # vmap complains of the sizes + sample_inputs_func=sample_inputs_put), + OpInfo('take', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + check_batched_grad=False, # vmap complains of the sizes + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_take, + error_inputs_func=error_inputs_take), + OpInfo('scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter, + error_inputs_func=error_inputs_scatter_and_scatter_add), + UnaryUfuncInfo( + 'bfloat16', + op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'bool', + op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attributis not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'byte', + op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'char', + op=lambda x, *args, **kwargs: x.char(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'double', + op=lambda x, *args, **kwargs: x.double(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'float', + op=lambda x, *args, **kwargs: x.float(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'half', + op=lambda x, *args, **kwargs: x.half(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=True, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'int', + op=lambda x, *args, **kwargs: x.int(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'long', + op=lambda x, *args, **kwargs: x.long(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'short', + op=lambda x, *args, **kwargs: x.short(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'cdouble', + op=torch.Tensor.cdouble, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'cfloat', + op=torch.Tensor.cfloat, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'chalf', + op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + # use of lambda doesn't work with test_normalize_operator_exhaustive + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', + device_type='cpu'), + # TypeError: 'int' object is not iterable + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + OpInfo('empty_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + reference_inputs_func=reference_inputs_like_fns, + supports_autograd=False, + skips=( + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), + "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('zeros_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + )), + OpInfo('ones_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + )), + OpInfo('randn', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs), + supports_out=True, + sample_inputs_func=sample_inputs_randn, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randn generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + # randn fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('randn_like', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('rand_like', + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('randint', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda *args, **kwargs: + wrapper_set_seed(torch.randint, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randint generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # randint fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices', + dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM), + )), + OpInfo('randint_like', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint_like, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('full_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_full_like, + supports_autograd=False, + skips=( + )), + OpInfo('new_zeros', + op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('new_ones', + op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('ones', + op=torch.ones, + supports_autograd=False, + supports_varargs=True, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('zeros', + op=torch.zeros, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('full', + op=torch.full, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_full, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # RuntimeError: UNSUPPORTED DTYPE: bool + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('new_empty', + op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + supports_autograd=False), + OpInfo('new_empty_strided', + op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True), + supports_autograd=False, + skips=( + # FX failed to normalize op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Lazy tensor failures + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('empty_strided', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_empty_strided, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), + # Lazy tensor failures + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), + # RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single + # memory location. Please clone() the tensor before performing the operation. + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('empty', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('eye', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_eye, + error_inputs_func=error_inputs_eye, + supports_out=True, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('empty_permuted', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty_permuted, + error_inputs_func=error_inputs_empty_permuted, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('scalar_tensor', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_scalar_tensor, + supports_autograd=False, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('new_full', + op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_full, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('multinomial', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_multinomial, + error_inputs_func=error_inputs_multinomial, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Strides are not the same! + # This may not be reproducible in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_autograd=False), + OpInfo('normal', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.normal, inp, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_first, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # RuntimeError: Difference from {dtype} is larger with decomposition + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # The inplace variant (Tensor.normal_) is different from torch.normal + # inplace varaint Tensor.normal_ is decomposed using randn_like() + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))), + OpInfo('normal', + # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here + variant_test_name='number_mean', + op=lambda std, mean, *args, **kwargs: + wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_second, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # AssertionError in CUDA variant + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))), + OpInfo('bernoulli', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), + # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli + inplace_variant=None, + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_bernoulli, + error_inputs_func=error_inputs_bernoulli, + skips=( + # vmap: We do not yet support calling random operations inside of vmap + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Expected RuntimeError when doing an unsafe cast from a result of + # dtype torch.float32 into an out= with dtype torch.lon + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + OpInfo('scatter_add', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_add, + error_inputs_func=error_inputs_scatter_and_scatter_add, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('stack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_stack, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # https://github.com/pytorch/pytorch/issues/77046 + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ), + ), + OpInfo('_chunk_cat', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_chunk_cat, + error_inputs_func=error_inputs_chunk_cat, + supports_autograd=False, + supports_out=True, + ), + OpInfo('hstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + BinaryUfuncInfo('hypot', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False), + OpInfo('histogram', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU + sample_inputs_func=sample_inputs_histogram, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Not Implemented on XLA. + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), + )), + OpInfo('histogramdd', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU + sample_inputs_func=sample_inputs_histogramdd, + error_inputs_func=error_inputs_histogramdd, + supports_autograd=False, + skips=( + # Not implemented on CUDA + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('histc', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), + sample_inputs_func=sample_inputs_histc, + supports_out=True, + supports_autograd=False, + skips=( + # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor + # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast + # from a result of dtype torch.float32 into an out= with dtype torch.long" + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('bincount', + dtypes=integral_types_and(), + sample_inputs_func=sample_inputs_bincount, + supports_out=False, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('bucketize', + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_bucketize, + reference_inputs_func=reference_inputs_bucketize, + error_inputs_func=error_inputs_bucketize, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('searchsorted', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_searchsorted, + supports_autograd=False, + ref=reference_searchsorted, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cat', + ref=_cat_np, + aliases=('concat', 'concatenate'), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + sample_inputs_func=sample_inputs_cat_concat, + reference_inputs_func=reference_inputs_cat, + error_inputs_func=error_inputs_cat, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + assert_autodiffed=True, + skips=( + # https://github.com/pytorch/pytorch/issues/89353 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: Arguments for call not valid. + # Expected a value of type 'List[Tensor]' for argument + # 'tensors' but instead found type 'Tensor (inferred)'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # see https://github.com/pytorch/pytorch/issues/99806 + # RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0. + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + )), + OpInfo('unbind', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + ref=reference_unbind, + sample_inputs_func=sample_inputs_unbind, + error_inputs_func=error_inputs_unbind, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + supports_out=False, + ), + OpInfo('vstack', + aliases=('row_stack',), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: _fn() Expected a value of type + # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), + OpInfo('dstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo('unfold', + op=lambda x, *args: x.unfold(*args), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ), + sample_inputs_func=sample_inputs_unfold), + OpInfo('unfold_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_unfold), + OpInfo('msort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_msort, + skips=( + )), + OpInfo('movedim', + aliases=('moveaxis',), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_movedim_moveaxis, + reference_inputs_func=reference_movedim_moveaxis, + error_inputs_func=error_movedim_moveaxis), + OpInfo('renorm', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_renorm, + error_inputs_func=error_inputs_renorm, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Difference from float64 is larger with decomposition + # linalg_vector_norm.default than original on output 0. + # Original max diff: 2.560596747969157e-07, + # Decomp max diff: 1.8187482915266173e-06 + DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive', + device_type='cpu', dtypes=(torch.float16,)), + )), + ShapeFuncInfo('repeat', + op=lambda x, dims: x.repeat(dims), + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('squeeze', + ref=_squeeze_ref, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze), + OpInfo('squeeze', + ref=_squeeze_ref, + variant_test_name="multiple", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze_multiple), + UnaryUfuncInfo( + 'fill', + ref=_fill_np, + method_variant=None, + sample_kwargs=_fill_sample_kwargs, + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + skips=( + # JIT has issue when op is passed as lambda + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), + )), + OpInfo('resize_', + op=lambda x, shape: x.clone().resize_(shape), + method_variant=None, + inplace_variant=torch.Tensor.resize_, + # the test fails because resize_ doesn't work with imag views as expected by the test + # https://github.com/pytorch/pytorch/issues/65945 + test_neg_view=False, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('resize_as_', + op=lambda x, other: torch.resize_as_(x.clone(), other), + method_variant=None, + inplace_variant=torch.Tensor.resize_as_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('take_along_dim', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_take_along_dim, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + ShapeFuncInfo('tile', + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile), + OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('trapezoid', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('cumulative_trapezoid', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_cumulative_trapezoid,), + OpInfo('unsqueeze', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + sample_inputs_func=sample_unsqueeze), + BinaryUfuncInfo('xlogy', + aliases=('special.xlogy',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test 0 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=0.01)), + OpInfo('zero_', + op=lambda x: torch.zero_(x.clone()), + method_variant=None, + inplace_variant=torch.Tensor.zero_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_zero_), + OpInfo('logsumexp', + aliases=('special.logsumexp',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_fast_mode=False, + sample_inputs_func=sample_inputs_logsumexp, + reference_inputs_func=reference_inputs_logsumexp), + OpInfo('trace', + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + error_inputs_func=error_inputs_trace, + supports_inplace_autograd=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_trace), + OpInfo('transpose', + ref=_numpy_ref_transpose, + aliases=('swapdims', 'swapaxes'), + assert_jit_shape_analysis=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_transpose_swapdims), + OpInfo('T', + op=lambda x: x.T, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T, + error_inputs_func=error_inputs_T), + OpInfo('H', + op=lambda x: x.H, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T), + OpInfo('mT', + op=lambda x: x.mT, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('mH', + op=lambda x: x.mH, + aliases=('adjoint',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('tril', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('tril_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('kron', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kron, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('inner', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_inner, + ), + OpInfo('tensordot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_tensordot, + skips=( + # Skip operator schema test because this is a functional and not an operator. + # Reference: https://github.com/pytorch/pytorch/issues/54574 + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ) + ), + OpInfo('to_sparse', + op=lambda x, *args: x.to_sparse(*args), + sample_inputs_func=sample_inputs_to_sparse, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + backward_dtypes=floating_types(), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_sparse_csr=True, + supports_sparse_csc=True, + check_batched_grad=False, + check_batched_gradgrad=False, + skips=( + # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend + DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), + # TODO: FIXME: complex inputs requiring grad error in forward + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Allowed exception: sparse tensors don't have strides + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'), + # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. + DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), + 'TestSparseCSR', 'test_sparse_csr_consistency'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ) + ), + OpInfo('logcumsumexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), + # RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble' + # Falling back to non-numerically stablized exp, causing nan in the results. + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), + ), + sample_inputs_func=sample_inputs_logcumsumexp, + error_inputs_func=error_inputs_logcumsumexp), + UnaryUfuncInfo('sigmoid', + aliases=('special.expit', 'nn.functional.sigmoid'), + aten_backward_name='sigmoid_backward', + ref=reference_sigmoid if TEST_SCIPY else None, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble])), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + assert_autodiffed=True, + # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 1j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + UnaryUfuncInfo('digamma', + ref=scipy.special.digamma if TEST_SCIPY else None, + aliases=('special.psi', 'special.digamma',), + decorators=(precisionOverride({torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erf', + ref=scipy.special.erf if TEST_SCIPY else None, + aliases=('special.erf', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + + ), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfc', + ref=scipy.special.erfc if TEST_SCIPY else None, + aliases=('special.erfc', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfinv', + ref=scipy.special.erfinv if TEST_SCIPY else None, + aliases=('special.erfinv', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + domain=(-1, 1), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + )), + OpInfo("nn.functional.smooth_l1_loss", + ref=reference_smooth_l1_loss, + sample_inputs_func=sample_inputs_smooth_l1_loss, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), + OpInfo( + "nn.functional.l1_loss", + ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), + sample_inputs_func=sample_inputs_l1_loss, + error_inputs_func=error_inputs_l1_loss, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + UnaryUfuncInfo('lgamma', + ref=reference_lgamma if TEST_SCIPY else None, + aliases=('special.gammaln', ), + decorators=(precisionOverride({torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + # lgamma have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + OpInfo( + 'logdet', + dtypes=floating_and_complex_types(), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + # `log_softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + OpInfo( + 'log_softmax', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + aten_backward_name='_log_softmax_backward_data', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo( + 'log_softmax', + variant_test_name='with_dtype', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('logit', + aten_backward_name='logit_backward', + ref=scipy.special.logit if TEST_SCIPY else None, + domain=(0, 1), + aliases=('special.logit', ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_logit), + OpInfo('where', + # Currently only the `input` is tested in gradcheck. + # If we pass `condition` first, none of the input which supports + # autograd will be tested. Hence the following lambda. + op=lambda self, condition, other, **kwargs: torch.where(condition, self, other, **kwargs), + ref=lambda self, condition, other: np.where(condition, self, other), + sample_inputs_func=sample_inputs_where, + reference_inputs_func=reference_inputs_where, + error_inputs_func=error_inputs_where, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + ), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), + OpInfo('nonzero', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # nonzero(): argument 'out' must be Tensor, not tuple + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67458 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # nonzero is not raising a warning when the out is resized + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Can't find schemas for this operator for some reason + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('nonzero_static', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero_static, + supports_out=False, + supports_autograd=False, + decorators=[onlyCPU], + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + # Following tests are for jiterator's python interface + # Jiterator can be used to author elementwise CUDA kernel + # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op + # See create_jit_fn in jiterator.py for more information + UnaryUfuncInfo( + 'jiterator_unary', + op=torch.cuda.jiterator._create_jit_fn("template T unary(T x) { return x * x + x; }"), + ref=lambda x: x * x + x, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[ + onlyCUDA, + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_hard'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + ], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=[torch.bool]), + # ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.complex64], active_if=TEST_WITH_ROCM), + # Expected failure: torch.jiterator_unary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_binary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_4inputs_with_extra_args', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", + alpha=1, beta=1), + ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary_return_by_ref', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_return_by_ref(T i0, T i1, T& out0) { + out0 = i0 + i1; + } + """, + num_outputs=1), + ref=operator.add, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_2inputs_2outputs', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_2outputs(T i0, T i1, T& out0, T& out1) { + out0 = i0 + i1; + out1 = i0 - i1; + } + """, + num_outputs=2), + ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + # `torch.norm` has multiple code paths depending on the value of `p`. + # These paths have different dtype support. Also JIT supports, + # most variants but not all of them. So we split the OpInfo entries, + # for `norm` based on the code-paths and JIT support. + OpInfo( + "norm", + sample_inputs_func=sample_inputs_norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # TODO Benchmark again with the new implementation + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='nuc', + sample_inputs_func=sample_inputs_norm_nuc, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + check_batched_gradgrad=False, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types(), + skips=( + # Dispatches in Python to matrix_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='fro', + sample_inputs_func=sample_inputs_norm_fro, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # MPS has some mild accuracy issues for float16. We divide the tolerances by 10 + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}), + 'TestConsistency', + 'test_output_match', + + ), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo( + "norm", + variant_test_name="inf", + sample_inputs_func=sample_inputs_norm_inf, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + # fast gradcheck produces NaNs + gradcheck_fast_mode=False, + skips=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)) + ), + ), + OpInfo('t', + sample_inputs_func=sample_inputs_t, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + assert_autodiffed=True, + error_inputs_func=error_inputs_t), + OpInfo( + "nn.functional.dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Probably because we have used lambda for the op here + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # inplace variant dispatches to dropout kernel, while on CUDA + # the op dispatches to _fused_dropout (with a few more conditions) + # hence, different values and this skip here + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "native_dropout_backward", + op=torch.ops.aten.native_dropout_backward.default, + aten_name="native_dropout_backward", + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_dropout_backward, + skips=( + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # Lazy tensor failures + DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # These tests fail only when built with ASAN + DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN), + DecorateInfo( + unittest.skip("Fails with ASAN"), + 'TestLazyOpInfo', + 'test_correctness_with_reusing_ir', + active_if=TEST_WITH_ASAN + ), + ), + ), + OpInfo( + "nn.functional.dropout2d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (3, 4) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.dropout3d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + check_batched_forward_grad=False, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: Tensor-likes are not close! + # Fails in cuda11.7 + # Error Log: https://github.com/pytorch/pytorch/actions/runs/3440108478/jobs/5738475757 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),), + # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype + # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="with_train", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # vmap: We do not yet support calling random operations inside of vmap. + # Please perform random operations outside of vmap as a workaround + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="without_train", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=partial(sample_inputs_dropout, train=False), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.one_hot", + ref=reference_one_hot, + supports_out=False, + dtypes=_dispatch_dtypes((torch.int64,)), + sample_inputs_func=sample_inputs_one_hot, + ), + OpInfo( + "nn.functional.embedding", + aten_backward_name="embedding_dense_backward", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_embedding, + allow_cow_input_materialize_forward=[0], + error_inputs_func=error_inputs_embedding, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Fails on CI https://github.com/pytorch/pytorch/issues/85377 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + # Reference: https://github.com/pytorch/pytorch/issues/67084 + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + # Not a problem: embedding does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + supports_expanded_weight=True, + supports_out=False, + ), + OpInfo( + "nn.functional.embedding_bag", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + # backward is not supported for mode `max` and dtype `bfloat16` + backward_dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_embedding_bag, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + supports_gradgrad=False, + allow_cow_input_materialize_forward=[0], + ), + OpInfo( + "nn.functional.multi_head_attention_forward", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_multi_head_attention_forward, + skips=( + # Tensor-likes are not close + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'), + + # TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention) + DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'), + # randomness + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # lambda impl + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # tests running very slowly break slow tests, so we skip them instead of using `slowTest`. + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_comprehensive', + dtypes=(torch.bfloat16, torch.float16), + ), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_quick', + dtypes=(torch.bfloat16, torch.float16))), + supports_out=False, + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + ), + UnaryUfuncInfo( + "nn.functional.softplus", + aten_backward_name='softplus_backward', + ref=reference_softplus, + sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + decorators=( + DecorateInfo( + toleranceOverride + ({ + torch.half: tol(atol=1e-2, rtol=1e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1e-2), + }), + 'TestUnaryUfuncs'), + ), + ), + OpInfo( + "nn.functional.mse_loss", + aten_backward_name='mse_loss_backward', + ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), + sample_inputs_func=sample_inputs_loss, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16), + backward_dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.grid_sample", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sample, + reference_inputs_func=reference_inputs_grid_sample, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15), + # TODO: delete this OpInfo once we add meta support for grid_sampler_3d + OpInfo( + "grid_sampler_2d", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sampler_2d, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15), + OpInfo( + "argwhere", + ref=np.argwhere, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_argwhere, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ), + ), + ReductionOpInfo( + 'all', + identity=True, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.all), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'any', + identity=False, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.any), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'amax', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amax), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'amin', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amin), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'argmax', + supports_multiple_dims=False, + supports_autograd=False, + assert_jit_shape_analysis=True, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + ), + ReductionOpInfo( + 'argmin', + supports_multiple_dims=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + ), + ReductionOpInfo( + 'count_nonzero', + identity=0, + supports_out=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_reduction_count_nonzero, + ref=reference_reduction_numpy(np.count_nonzero), + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionOpInfo( + 'mean', + nan_policy='propagate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # FIXME: mean needs 'dim' parameter when using the 'out' overload. + # Adding it with 'generate_args_kwargs' does not work, since these also get passed + # onto the reference implementations. + supports_out=False, + assert_autodiffed=True, + assert_jit_shape_analysis=True, + promotes_int_to_float=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.mean), + error_inputs_func=error_inputs_mean, + skips=( + # FIXME: mean does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'nanmean', + nan_policy='omit', + assert_autodiffed=True, + promotes_int_to_float=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nanmean), + skips=( + # AssertionError: False is not true : + # Failure in testing nodes' autodifferentiation. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + device_type='cuda', dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'std', + nan_policy='propagate', + supports_out=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.std), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + ReductionOpInfo( + 'std', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'var', + nan_policy='propagate', + supports_out=True, + assert_autodiffed=True, + promotes_int_to_float=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.var), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), + # NumPy is giving NaN for this + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), + ), + ), + ReductionOpInfo( + 'var', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'prod', + identity=1, + nan_policy='propagate', + supports_multiple_dims=False, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_prod, + ref=prod_numpy, + skips=( + # FIXME: prod does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: prod does not support passing None to dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.uint8, torch.float16, torch.complex64]), + # FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float16]), + ), + ), + ReductionOpInfo( + 'sum', + identity=0, + nan_policy='propagate', + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_reduction_numpy(np.sum), + error_inputs_sparse_func=error_inputs_sparse_reduction_sum, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc), + skips=( + # FIXME: sum does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + ReductionOpInfo( + 'nansum', + identity=0, + nan_policy='omit', + supports_out=True, + promotes_int_to_int64=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nansum), + skips=( + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: nansum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: flaky test so skipped instead of xfailed + # possibly bad low precision reference in numpy + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + ), + ), + OpInfo( + "nn.functional.ctc_loss", + dtypes=floating_types(), + supports_out=False, + sample_inputs_func=sample_inputs_ctc_loss, + skips=( + # https://github.com/pytorch/pytorch/issues/67462 + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_grad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + # Ref: https://github.com/pytorch/pytorch/issues/85231 + DecorateInfo(unittest.skip("Fails with ASAN"), + 'TestProxyTensorOpInfo', + 'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN), + ), + ), + OpInfo( + "nn.functional.cosine_embedding_loss", + dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_embedding_loss, + ), + OpInfo( + "nn.functional.nll_loss", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_nll_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + skips=( + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0, i1): + # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120782 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + DecorateInfo(unittest.skip("FP16 nll_loss cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + + ), + ), + OpInfo( + "nn.functional.gaussian_nll_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_gaussian_nll_loss, + error_inputs_func=error_inputs_gaussian_nll_loss, + skips=( + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.hinge_embedding_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_hinge_embedding_loss, + error_inputs_func=error_inputs_hinge_embedding_loss, + reference_inputs_func=reference_inputs_hinge_embedding_loss, + ), + OpInfo( + "nn.functional.huber_loss", + aten_backward_name='huber_loss_backward', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_huber_loss, + error_inputs_func=error_inputs_huber_loss, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ) + ), + OpInfo( + "nn.functional.pdist", + ref=reference_pdist, + sample_inputs_func=sample_inputs_pdist, + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + skips=( + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + ) + ), + OpInfo( + "nn.functional.poisson_nll_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_poisson_nll_loss, + error_inputs_func=error_inputs_poisson_nll_loss, + ), + OpInfo( + "argsort", + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + OpInfo( + "repeat_interleave", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_repeat_interleave, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pairwise_distance", + ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( + np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) + ), + sample_inputs_func=sample_inputs_pairwise_distance, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_shuffle", + sample_inputs_func=sample_inputs_pixel_shuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_unshuffle", + sample_inputs_func=sample_inputs_pixel_unshuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.channel_shuffle", + sample_inputs_func=sample_inputs_channel_shuffle, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + backward_dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + skips=( + # Skip due to NotImplementedError for MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + OpInfo( + "nn.functional.kl_div", + sample_inputs_func=sample_inputs_kl_div, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "diagflat", + ref=lambda input, offset=0: np.diagflat(input, k=offset), + sample_inputs_func=sample_inputs_diagflat, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='sum', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='prod', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_reduce, + skips=( + # Not implemented + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + ), + ), + OpInfo( + 'scatter_reduce', + variant_test_name='mean', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amin', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amax', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='lengths', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=sample_inputs_segment_reduce, + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='offsets', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'), + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), +] +op_db += opinfo.definitions.op_db + + +# Separate registry for experimental Python Reference OpInfos. +python_ref_db = [ + # + # Elementwise Unary OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.abs", + torch_opinfo_name="abs", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acos", + torch_opinfo_name="acos", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acosh", + torch_opinfo_name="acosh", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asin", + torch_opinfo_name="asin", + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asinh", + torch_opinfo_name="asinh", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + PythonRefInfo( + "_refs.lerp", + torch_opinfo_name="lerp", + ), + PythonRefInfo( + "_refs.ones", + torch_opinfo_name="ones", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.zeros", + torch_opinfo_name="zeros", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.cauchy", + torch_opinfo_name="cauchy", + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.exponential", + torch_opinfo_name="exponential", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.geometric", + torch_opinfo_name="geometric", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.log_normal", + torch_opinfo_name="log_normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + torch_opinfo_variant_name="number_mean", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal_", + op=torch.Tensor.normal_, + torch_opinfo_name="normal", + torch_opinfo_variant_name="in_place", + supports_out=False, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.arange", + torch_opinfo_name="arange", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="variadic_tensors", + ), + PythonRefInfo( + "_refs.take_along_dim", + torch_opinfo_name="take_along_dim", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.to", + torch_opinfo_name="to", + ), + PythonRefInfo( + "_refs.triu", + torch_opinfo_name="triu", + ), + PythonRefInfo( + "_refs.tril", + torch_opinfo_name="tril", + ), + PythonRefInfo( + "_refs.triu_indices", + torch_opinfo_name="triu_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.tril_indices", + torch_opinfo_name="tril_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="list_of_tensors", + ), + PythonRefInfo( + "_refs.movedim", + aliases=('moveaxis',), + torch_opinfo_name="movedim", + ), + PythonRefInfo( + "_refs.bucketize", + torch_opinfo_name="bucketize", + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor with + # aten._local_scalar_dense.default - erroring out! [...] + # triggered by mid_val = boundaries[mid] + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"), + ) + ), + PythonRefInfo( + "_refs.equal", + torch_opinfo_name="equal", + skips=( + # RuntimeError: Cannot cast FakeTensor to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atan", + torch_opinfo_name="atan", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atanh", + torch_opinfo_name="atanh", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.bitwise_not", + torch_opinfo_name="bitwise_not", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.ceil", + torch_opinfo_name="ceil", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.item", + torch_opinfo_name="item", + skips=( + # RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # ValueError: Can't convert a tensor with 10 elements to a number! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj_physical", + torch_opinfo_name="conj_physical", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cos", + torch_opinfo_name="cos", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cosh", + torch_opinfo_name="cosh", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.digamma", + torch_opinfo_name="digamma", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erf", + torch_opinfo_name="erf", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfinv", + torch_opinfo_name="erfinv", + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfc", + torch_opinfo_name="erfc", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp", + torch_opinfo_name="exp", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.expm1", + torch_opinfo_name="expm1", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp2", + torch_opinfo_name="exp2", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.fill", + torch_opinfo_name="fill", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.floor", + torch_opinfo_name="floor", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frexp", + torch_opinfo_name="frexp", + # Skipped due to numerical failures on Windows CI. + # This is also skipped in frexp earlier in the file. + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frac", + torch_opinfo_name="frac", + skips=( + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.imag", + torch_opinfo_name="imag", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isfinite", + torch_opinfo_name="isfinite", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isinf", + torch_opinfo_name="isinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isposinf", + torch_opinfo_name="isposinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isneginf", + torch_opinfo_name="isneginf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isnan", + torch_opinfo_name="isnan", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isreal", + torch_opinfo_name="isreal", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.i0", + torch_opinfo_name="i0", + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.int8,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.lgamma", + torch_opinfo_name="lgamma", + decorators=(precisionOverride({torch.float16: 7e-1}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_1", + skips=skips_mvlgamma(), + decorators=( + DecorateInfo(torch.testing._internal.common_utils.markDynamoStrictTest, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + DecorateInfo(torch.testing._internal.common_utils.xfailIfTorchDynamo, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_3", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_5", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log", + torch_opinfo_name="log", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log1p", + torch_opinfo_name="log1p", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log10", + torch_opinfo_name="log10", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log2", + torch_opinfo_name="log2", + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + PythonRefInfo( + "_refs.logsumexp", + torch_opinfo_name="logsumexp", + # When keepdim=False logsumexp function uses squeeze operation + # that is not yet exposed in nvFuser's Python API. + ), + PythonRefInfo( + "_refs.log_softmax", + torch_opinfo_name="log_softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nan_to_num", + torch_opinfo_name="nan_to_num", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.neg", + torch_opinfo_name="neg", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.positive", + torch_opinfo_name="positive", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.real", + torch_opinfo_name="real", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.reciprocal", + torch_opinfo_name="reciprocal", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.round", + torch_opinfo_name="round", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + skips=( + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rsqrt", + torch_opinfo_name="rsqrt", + decorators=(precisionOverride({torch.half: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sigmoid", + torch_opinfo_name="sigmoid", + aliases=('_refs.special.expit',), + # Reference: https://github.com/pytorch/pytorch/issues/56012 + handles_complex_extremal_values=False, + handles_large_floats=False, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble]) + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sign", + torch_opinfo_name="sign", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sgn", + torch_opinfo_name="sgn", + # This is an issue with the vectorised abs on CPU + handles_complex_extremal_values=False, + handles_large_floats=False, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.signbit", + torch_opinfo_name="signbit", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sin", + torch_opinfo_name="sin", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinc", + torch_opinfo_name="sinc", + decorators=(precisionOverride({torch.bfloat16: 1e-2, + torch.float16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49133 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.cfloat]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinh", + torch_opinfo_name="sinh", + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + ), + ), + PythonRefInfo( + "_refs.softmax", + torch_opinfo_name="softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sqrt", + torch_opinfo_name="sqrt", + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.bfloat16,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.square", + torch_opinfo_name="square", + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + skips=( + # AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)), + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tan", + torch_opinfo_name="tan", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tanh", + torch_opinfo_name="tanh", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.trunc", + torch_opinfo_name="trunc", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.special.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.special.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.logit", + torch_opinfo_name="logit", + ), + # + # Elementwise Unary nn.functional OpInfos + # + PythonRefInfo( + "_refs.nn.functional.alpha_dropout", + torch_opinfo_name="nn.functional.alpha_dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.celu", + torch_opinfo_name="nn.functional.celu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.threshold", + torch_opinfo_name="nn.functional.threshold", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.dropout", + torch_opinfo_name="nn.functional.dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # dropout is not comparable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.elu", + torch_opinfo_name="nn.functional.elu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardtanh", + torch_opinfo_name="nn.functional.hardtanh", + supports_out=True, + ), + PythonRefInfo( # TODO: Port this to an UnaryOpInfo + "_refs.nn.functional.gelu", + torch_opinfo_name="nn.functional.gelu", + ), + PythonRefInfo( + "_refs.nn.functional.layer_norm", + torch_opinfo_name="nn.functional.layer_norm", + skips=( + # Reference result was farther (3.5762786809723224e-07) from the precise computation + # than the torch result was (2.5068410824946596e-07)! + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float32,), device_type='cpu'), + ), + ), + PythonRefInfo( + "_refs.nn.functional.glu", + torch_opinfo_name="nn.functional.glu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pairwise_distance", + torch_opinfo_name="nn.functional.pairwise_distance", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pdist", + torch_opinfo_name="nn.functional.pdist", + supports_out=True, + skips=( + # RunTimeError: no _refs support for torch.Tensor.index_select + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + )), + PythonRefInfo( + "_refs.nn.functional.leaky_relu", + torch_opinfo_name="nn.functional.leaky_relu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.pixel_shuffle", + torch_opinfo_name="nn.functional.pixel_shuffle", + ), + PythonRefInfo( + "_refs.nn.functional.pixel_unshuffle", + torch_opinfo_name="nn.functional.pixel_unshuffle", + ), + PythonRefInfo( + "_refs.nn.functional.poisson_nll_loss", + torch_opinfo_name="nn.functional.poisson_nll_loss", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.prelu", + torch_opinfo_name="nn.functional.prelu", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu", + torch_opinfo_name="nn.functional.relu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu6", + torch_opinfo_name="nn.functional.relu6", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.mish", + torch_opinfo_name="nn.functional.mish", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + 'TestUnaryUfuncs',), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.selu", + torch_opinfo_name="nn.functional.selu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + PythonRefInfo( + "_refs.nn.functional.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.softmin", + torch_opinfo_name="nn.functional.softmin", + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softplus", + torch_opinfo_name="nn.functional.softplus", + ), + PythonRefInfo( + "_refs.nn.functional.l1_loss", + torch_opinfo_name="nn.functional.l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.margin_ranking_loss", + torch_opinfo_name="nn.functional.margin_ranking_loss", + ), + PythonRefInfo( + "_refs.nn.functional.mse_loss", + torch_opinfo_name="nn.functional.mse_loss", + ), + PythonRefInfo( + "_refs.nn.functional.smooth_l1_loss", + torch_opinfo_name="nn.functional.smooth_l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.hinge_embedding_loss", + torch_opinfo_name="nn.functional.hinge_embedding_loss", + ), + PythonRefInfo( + "_refs.nn.functional.nll_loss", + torch_opinfo_name="nn.functional.nll_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + # For simpler indexing, we flatten target indices, then reshape the result tensor. + # This creates inconsistent view state with reference impl. + validate_view_consistency=False, + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda" + ), + ), + ), + PythonRefInfo( + "_refs.nn.functional.huber_loss", + torch_opinfo_name="nn.functional.huber_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.tanhshrink", + torch_opinfo_name="nn.functional.tanhshrink", + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02), + torch.complex64: tol(atol=6e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), + active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), + device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardshrink", + torch_opinfo_name="nn.functional.hardshrink", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softshrink", + torch_opinfo_name="nn.functional.softshrink", + ), + # + # Elementwise Binary Reference OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.add", + torch_opinfo_name="add", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.atan2", + torch_opinfo_name="atan2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_and", + torch_opinfo_name="bitwise_and", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_left_shift", + torch_opinfo_name="bitwise_left_shift", + skips=( + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_right_shift", + torch_opinfo_name="bitwise_right_shift", + skips=( + # # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_or", + torch_opinfo_name="bitwise_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_xor", + torch_opinfo_name="bitwise_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.copysign", + torch_opinfo_name="copysign", + skips=( + # RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu! + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="no_rounding_mode", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # NotImplementedError: argument of type: + DecorateInfo( + unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32, torch.complex64, torch.complex128,) + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="trunc_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="floor_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.eq", + torch_opinfo_name="eq", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.float_power", + torch_opinfo_name="float_power", + skips=( + # Test doesn't account for float -> double type promotion + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logaddexp", + torch_opinfo_name="logaddexp", + skips=( + # failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + PythonRefInfo( + "_refs.logaddexp2", + torch_opinfo_name="logaddexp2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.floor_divide", + torch_opinfo_name="floor_divide", + rhs_make_tensor_kwargs=dict(exclude_zero=True), + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + # bfloat16 floor_divide compared with a float32 reference works inconsistently + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,)), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmax", + torch_opinfo_name="fmax", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmin", + torch_opinfo_name="fmin", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmod", + torch_opinfo_name="fmod", + rhs_make_tensor_kwargs={'exclude_zero': True}, + supports_rhs_python_scalar=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gcd", + torch_opinfo_name="gcd", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ge", + torch_opinfo_name="ge", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gt", + torch_opinfo_name="gt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.heaviside", + torch_opinfo_name="heaviside", + supports_rhs_python_scalar=False, + skips=( + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.hypot", + torch_opinfo_name="hypot", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igamma", + torch_opinfo_name="igamma", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igammac", + torch_opinfo_name="igammac", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.isclose", + torch_opinfo_name="isclose", + skips=( + # Intentional xfail -- isclose does not type promote + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lcm", + torch_opinfo_name="lcm", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.le", + torch_opinfo_name="le", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_and", + torch_opinfo_name="logical_and", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.logical_not", + torch_opinfo_name="logical_not", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_or", + torch_opinfo_name="logical_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_xor", + torch_opinfo_name="logical_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lt", + torch_opinfo_name="lt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.maximum", + torch_opinfo_name="maximum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.minimum", + torch_opinfo_name="minimum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.mul", + torch_opinfo_name="mul", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type='cuda' + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type='cuda' + ), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ne", + torch_opinfo_name="ne", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.nextafter", + torch_opinfo_name="nextafter", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.pow", + torch_opinfo_name="pow", + decorators=( + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.remainder", + torch_opinfo_name="remainder", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.rsub", + torch_opinfo_name="rsub", + # https://github.com/pytorch/pytorch/issues/76944 + skips=( + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.chalf,), device_type='cpu'), + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.chalf,), device_type='cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.sub", + torch_opinfo_name="sub", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.true_divide", + torch_opinfo_name="true_divide", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + # + # Elementwise Ternary Reference OpInfos + # + PythonRefInfo( + "_refs.addcdiv", + torch_opinfo_name="addcdiv", + ), + PythonRefInfo( + "_refs.addcmul", + torch_opinfo_name="addcmul", + skips=( + # Reference result was farther (1.3343989849090576e-05) + # from the precise computation than the torch result + # was (9.592622518539429e-06)! + # FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float16,), device_type="cpu"), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.float16,), device_type="cpu"), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_min", + torch_opinfo_name="clamp_min", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_max", + torch_opinfo_name="clamp_max", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.clamp", + torch_opinfo_name="clamp", + ), + PythonRefInfo( + "_refs.nn.functional.triplet_margin_loss", + torch_opinfo_name="nn.functional.triplet_margin_loss", + supports_out=False, + # TODO: Uses minimum and clamp + skips=( + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed) + # Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed) + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8,), device_type="cpu"), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.xlogy", + torch_opinfo_name="xlogy", + supports_one_python_scalar=True, + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.xlog1py", + torch_opinfo_name="special.xlog1py", + supports_one_python_scalar=True, + ), + # + # Data Conversion & Data Movement Opinfos + # + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bfloat16", + torch_opinfo_name="bfloat16", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bool", + torch_opinfo_name="bool", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.byte", + torch_opinfo_name="byte", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.char", + torch_opinfo_name="char", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.complex", + torch_opinfo_name="complex", + error_inputs_func=partial(error_inputs_complex, is_ref=True), + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.polar", + torch_opinfo_name="polar", + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.double", + torch_opinfo_name="double", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.float", + torch_opinfo_name="float", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.half", + torch_opinfo_name="half", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.int", + torch_opinfo_name="int", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.long", + torch_opinfo_name="long", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.short", + torch_opinfo_name="short", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.chalf", + torch_opinfo_name="chalf", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cfloat", + torch_opinfo_name="cfloat", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cdouble", + torch_opinfo_name="cdouble", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.clone", + torch_opinfo_name="clone", + ), + # + # View & Shape OpInfos + # + PythonRefInfo( + "_refs.atleast_1d", + torch_opinfo_name="atleast_1d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_2d", + torch_opinfo_name="atleast_2d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_3d", + torch_opinfo_name="atleast_3d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + torch_opinfo_variant_name="partial_views", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.as_strided_scatter", + torch_opinfo_name="as_strided_scatter", + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.block_diag", + torch_opinfo_name="block_diag", + ), + PythonRefInfo( + "_refs.broadcast_shapes", + torch_opinfo_name="broadcast_shapes", + ), + PythonRefInfo( + "_refs.broadcast_tensors", + torch_opinfo_name="broadcast_tensors", + ), + PythonRefInfo( + "_refs.broadcast_to", + torch_opinfo_name="broadcast_to", + ), + PythonRefInfo( + "_refs.cat", + torch_opinfo_name="cat", + skips=( + # FIXME: AssertionError: RuntimeError not raised + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.chunk", + torch_opinfo_name="chunk", + ), + PythonRefInfo( + "_refs.column_stack", + torch_opinfo_name="column_stack", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj", + torch_opinfo_name="conj", + ), + PythonRefInfo( + "_refs.constant_pad_nd", + torch_opinfo_name="constant_pad_nd", + ), + PythonRefInfo( + "_refs.contiguous", + torch_opinfo_name="contiguous", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.deg2rad", + torch_opinfo_name="deg2rad", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.dsplit", + torch_opinfo_name="dsplit", + ), + PythonRefInfo( + "_refs.diag", + torch_opinfo_name="diag", + ), + PythonRefInfo( + "_refs.diagonal", + torch_opinfo_name="diagonal", + ), + PythonRefInfo( + "_refs.diagonal_copy", + torch_opinfo_name="diagonal_copy", + ), + PythonRefInfo( + "_refs.diagonal_scatter", + torch_opinfo_name="diagonal_scatter", + supports_out=True, + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.diag_embed", + torch_opinfo_name="diag_embed", + supports_out=True, + ), + PythonRefInfo( + "_refs.dstack", + torch_opinfo_name="dstack", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.expand", + torch_opinfo_name="expand", + ), + PythonRefInfo( + "_refs.expand_as", + torch_opinfo_name="expand_as", + ), + PythonRefInfo( + "_refs.flatten", + torch_opinfo_name="flatten", + ), + PythonRefInfo( + "_refs.flip", + torch_opinfo_name="flip", + ), + PythonRefInfo( + "_refs.fliplr", + torch_opinfo_name="fliplr", + ), + PythonRefInfo( + "_refs.flipud", + torch_opinfo_name="flipud", + ), + PythonRefInfo( + "_refs.hstack", + torch_opinfo_name="hstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.narrow", + torch_opinfo_name="narrow", + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True), + ), + PythonRefInfo( + "_refs.narrow_copy", + torch_opinfo_name="narrow_copy", + supports_out=True, + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True), + ), + PythonRefInfo( + "_refs.nn.functional.group_norm", + torch_opinfo_name="nn.functional.group_norm", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.native_layer_norm", + torch_opinfo_name="native_layer_norm", + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref", + device_type="cpu", dtypes=(torch.float32,)), + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback", + device_type="cpu", dtypes=(torch.float32,)), + ), + ), + PythonRefInfo( + "_refs.permute", + torch_opinfo_name="permute", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rad2deg", + torch_opinfo_name="rad2deg", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.ravel", + torch_opinfo_name="ravel", + ), + PythonRefInfo( + "_refs.renorm", + torch_opinfo_name="renorm", + ), + PythonRefInfo( + "_refs.repeat", + torch_opinfo_name="repeat", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.reshape", + torch_opinfo_name="reshape", + ), + PythonRefInfo( + "_refs.reshape_as", + torch_opinfo_name="reshape_as", + ), + PythonRefInfo( + "_refs.roll", + torch_opinfo_name="roll", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.rot90", + torch_opinfo_name="rot90", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.select_scatter", + torch_opinfo_name="select_scatter", + ), + PythonRefInfo( + "_refs.stack", + torch_opinfo_name="stack", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + torch_opinfo_variant_name="multiple", + ), + PythonRefInfo( + "_refs.tensor_split", + torch_opinfo_name="tensor_split", + skips=( + # TensorMeta doesn't support tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # RuntimeError: no _refs support for torch.Tensor.tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.hsplit", + torch_opinfo_name="hsplit", + ), + PythonRefInfo( + "_refs.vsplit", + torch_opinfo_name="vsplit", + ), + PythonRefInfo( + "_refs.dot", + torch_opinfo_name="dot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.vdot", + torch_opinfo_name="vdot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.transpose", + torch_opinfo_name="transpose", + ), + PythonRefInfo( + "_refs.t", + torch_opinfo_name="t", + ), + PythonRefInfo( + "_refs.T", + torch_opinfo_name="T", + error_inputs_func=partial(error_inputs_T, has_ndims_error=True), + ), + PythonRefInfo( + "_refs.unfold", + torch_opinfo_name="unfold", + ), + PythonRefInfo( + "_refs.unfold_copy", + torch_opinfo_name="unfold_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.unsqueeze", + torch_opinfo_name="unsqueeze", + ), + PythonRefInfo( + "_refs.view", + torch_opinfo_name="view", + ), + PythonRefInfo( + "_refs.view_as", + torch_opinfo_name="view_as", + ), + PythonRefInfo( + "_refs.vstack", + torch_opinfo_name="vstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.unflatten", + torch_opinfo_name="unflatten", + ), + PythonRefInfo( + "_refs.unbind", + torch_opinfo_name="unbind", + ), + # + # Reduction Reference OpInfos + # + ReductionPythonRefInfo( + "_refs.all", + torch_opinfo_name="all", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.amax", + torch_opinfo_name="amax", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.amin", + torch_opinfo_name="amin", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.any", + torch_opinfo_name="any", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.count_nonzero", + torch_opinfo_name="count_nonzero", + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_default_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_multi_unsorted_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionPythonRefInfo( + "_refs.mean", + torch_opinfo_name="mean", + supports_out=True, + error_inputs_func=partial(error_inputs_mean, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.std", + torch_opinfo_name="std", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + # std_mean and var_mean are not ReductionInfos + PythonRefInfo( + "_refs.std_mean", + torch_opinfo_name="std_mean", + ), + ReductionPythonRefInfo( + "_refs.sum", + torch_opinfo_name="sum", + supports_out=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + PythonRefInfo( + "_refs.cumsum", + torch_opinfo_name="cumsum", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.cumprod", + torch_opinfo_name="cumprod", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.sum_to_size", + torch_opinfo_name="sum_to_size", + validate_view_consistency=False, + ), + ReductionPythonRefInfo( + "_refs.prod", + torch_opinfo_name="prod", + supports_out=True, + supports_multiple_dims=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + ), + ), + ReductionPythonRefInfo( + "_refs.var", + torch_opinfo_name="var", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + ), + ), + PythonRefInfo( + "_refs.var_mean", + torch_opinfo_name="var_mean", + validate_view_consistency=False, + ), + # + # Linear Algebra Operators + # + PythonRefInfo( + "_refs.addr", + torch_opinfo_name="addr", + decorators=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), + ), + ), + PythonRefInfo( + "_refs.trace", + torch_opinfo_name="trace", + ), + PythonRefInfo( + "_refs.norm", + torch_opinfo_name="norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + ), + # + # Tensor Creation Reference OpInfos + # + PythonRefInfo( + "_refs.empty", + torch_opinfo_name="empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: shouldn't check empty results + DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.empty_like", + torch_opinfo_name="empty_like", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.randn", + torch_opinfo_name="randn", + op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs), + skips=( + # see https://github.com/pytorch/pytorch/issues/85121 + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), + 'TestCommon', + 'test_python_ref_executor'), + # These tests expect the input to be a tensor or a sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.eye", + torch_opinfo_name="eye", + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + ), + ), + PythonRefInfo( + "_refs.new_empty", + torch_opinfo_name="new_empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_empty_strided", + torch_opinfo_name="new_empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + ), + ), + PythonRefInfo( + "_refs.empty_strided", + torch_opinfo_name="empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_full", + torch_opinfo_name="new_full", + ), + PythonRefInfo( + "_refs.new_ones", + torch_opinfo_name="new_ones", + ), + PythonRefInfo( + "_refs.new_zeros", + torch_opinfo_name="new_zeros", + ), + # + # Conditional Reference OpInfos + # + PythonRefInfo( + "_refs.masked_fill", + torch_opinfo_name="masked_fill", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.where", + torch_opinfo_name="where", + op=lambda self, condition, other: refs.where(condition, self, other), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'), + ), + ), + PythonRefInfo( + "_refs.index_select", + torch_opinfo_name="index_select", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + # Sample out= with a stride of zero. This _out operation checks that the input has no + # inner overlap + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),) + ), + PythonRefInfo( + "_refs.index_copy", + torch_opinfo_name="index_copy", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.index_add", + torch_opinfo_name="index_add", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.index_fill", + torch_opinfo_name="index_fill", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),) + ), + # + # Test-related functions + # + PythonRefInfo( + "_refs.allclose", + torch_opinfo_name="allclose", + ), + # + # Misc functions + # + PythonRefInfo( + "_refs.stft", + torch_opinfo_name="stft", + skips=[ + # RuntimeError: no _refs support for aten.pad + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.istft", + torch_opinfo_name="istft", + skips=[ + # RuntimeError: no _refs support for aten.unfold_backward + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.view_as_complex", + torch_opinfo_name="view_as_complex", + ), +] +python_ref_db += opinfo.definitions.python_ref_db + +# Common operator groupings +ops_and_refs = op_db + python_ref_db +unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)] +binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)] +binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) +spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)] +sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] +sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] +sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] +shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)] +reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)] +reference_filtered_ops = [op for op in reduction_ops if op.ref is not None] +reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')] +sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')] + +# TODO: review porting these to make_tensor +def index_variable(shape, max_indices, device=torch.device('cpu')): + if not isinstance(shape, tuple): + shape = (shape,) + index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() + return index + +def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): + assert len(shape) == 2 + assert index_dim < 2 + batch_dim = 1 - index_dim + index = torch.zeros(*shape, dtype=torch.long, device=device) + for i in range(shape[index_dim]): + index.select(index_dim, i).copy_( + torch.randperm(max_indices, device=device)[:shape[batch_dim]]) + if duplicate: + index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) + return index + +def bernoulli_scalar(): + return torch.tensor(0, dtype=torch.bool).bernoulli_() + +def mask_not_all_zeros(shape): + assert len(shape) > 0 + while True: + result = torch.randn(shape).gt(0) + if result.sum() > 0: + return result + +# Copied from functorch +def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, True) + + +def skip(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, False) + + +def skipOps(test_case_name, base_test_name, to_skip): + all_opinfos = op_db + for xfail in to_skip: + op_name, variant_name, device_type, dtypes, expected_failure = xfail + matching_opinfos = [o for o in all_opinfos + if o.name == op_name and o.variant_test_name == variant_name] + assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" + for op in matching_opinfos: + decorators = list(op.decorators) + if expected_failure: + decorator = DecorateInfo(unittest.expectedFailure, + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + else: + decorator = DecorateInfo(unittest.skip("Skipped!"), + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + op.decorators = tuple(decorators) + + # This decorator doesn't modify fn in any way + def wrapped(fn): + return fn + return wrapped diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9d01cf9cde74e5252c8e796659a46af93de1f6 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py @@ -0,0 +1,78 @@ +# mypy: ignore-errors + +import contextlib +import functools +import inspect + +import torch + + +# Test whether hardware BF32 math mode enabled. It is enabled only on: +# - MKLDNN is available +# - BF16 is supported by MKLDNN +def bf32_is_not_fp32(): + if not torch.backends.mkldnn.is_available(): + return False + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + return False + return True + + +@contextlib.contextmanager +def bf32_off(): + old_matmul_precision = torch.get_float32_matmul_precision() + try: + torch.set_float32_matmul_precision("highest") + yield + finally: + torch.set_float32_matmul_precision(old_matmul_precision) + + +@contextlib.contextmanager +def bf32_on(self, bf32_precision=1e-5): + old_matmul_precision = torch.get_float32_matmul_precision() + old_precision = self.precision + try: + torch.set_float32_matmul_precision("medium") + self.precision = bf32_precision + yield + finally: + torch.set_float32_matmul_precision(old_matmul_precision) + self.precision = old_precision + + +# This is a wrapper that wraps a test to run this test twice, one with +# allow_bf32=True, another with allow_bf32=False. When running with +# allow_bf32=True, it will use reduced precision as specified by the +# argument +def bf32_on_and_off(bf32_precision=1e-5): + def with_bf32_disabled(self, function_call): + with bf32_off(): + function_call() + + def with_bf32_enabled(self, function_call): + with bf32_on(self, bf32_precision): + function_call() + + def wrapper(f): + params = inspect.signature(f).parameters + arg_names = tuple(params.keys()) + + @functools.wraps(f) + def wrapped(*args, **kwargs): + for k, v in zip(arg_names, args): + kwargs[k] = v + cond = bf32_is_not_fp32() + if "device" in kwargs: + cond = cond and (torch.device(kwargs["device"]).type == "cpu") + if "dtype" in kwargs: + cond = cond and (kwargs["dtype"] == torch.float) + if cond: + with_bf32_disabled(kwargs["self"], lambda: f(**kwargs)) + with_bf32_enabled(kwargs["self"], lambda: f(**kwargs)) + else: + f(**kwargs) + + return wrapped + + return wrapper diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7e3739695d026b0359dc04b3d8595291af109d --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py @@ -0,0 +1,4445 @@ +# mypy: ignore-errors + +import torch +import unittest +from copy import deepcopy +from enum import Enum +from functools import wraps, partial +from itertools import chain, product +import itertools +import math +import torch.nn.functional as F +from torch.nn.utils.rnn import pack_padded_sequence +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import TEST_CUDNN +from torch.testing._internal.common_dtype import ( + floating_types, floating_and_complex_types_and, get_all_fp_dtypes) +from torch.testing._internal.common_device_type import ( + _TestParametrizer, _update_param_kwargs, toleranceOverride, tol, + skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS, skipCUDAVersionIn) +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_nn import ( + cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference, + hingeembeddingloss_reference, huberloss_reference, kldivloss_reference, + marginrankingloss_reference, multimarginloss_reference, multilabelmarginloss_reference, + nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction) +from torch.testing._internal.common_utils import ( + freeze_rng_state, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS, + skipIfTorchDynamo) +from types import ModuleType +from typing import List, Tuple, Type, Set, Dict +import operator + +# List of all namespaces containing modules to test. +MODULE_NAMESPACES: List[ModuleType] = [ + torch.nn.modules, + torch.ao.nn.qat.modules, + torch.ao.nn.quantizable.modules, + torch.ao.nn.quantized.modules, + torch.ao.nn.quantized.modules, +] + +# Modules that shouldn't be tested for one reason or another. +MODULES_TO_SKIP: Set[Type] = { + torch.nn.Module, # abstract base class + torch.nn.Container, # deprecated + torch.nn.NLLLoss2d, # deprecated + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d +} + +# List of all module classes to test. +MODULE_CLASSES: List[Type] = list(chain(*[ + [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined] + for namespace in MODULE_NAMESPACES])) +MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP] + +# Dict of module class -> common name. Useful for making test names more intuitive. +# Example: torch.nn.modules.linear.Linear -> "nn.Linear" +MODULE_CLASS_NAMES: Dict[Type, str] = {} +for namespace in MODULE_NAMESPACES: + for module_name in namespace.__all__: # type: ignore[attr-defined] + module_cls = getattr(namespace, module_name) + namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '') + + # Deal with any aliases by preferring earlier names. + if module_cls not in MODULE_CLASS_NAMES: + MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}' + + +# Specifies the modes (i.e. train, eval) to test over. +TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval')) + + +class modules(_TestParametrizer): + """ PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """ + + def __init__(self, module_info_iterable, allowed_dtypes=None, + train_eval_mode=TrainEvalMode.train_and_eval, skip_if_dynamo=True): + self.module_info_list = list(module_info_iterable) + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.train_eval_mode = train_eval_mode + self.skip_if_dynamo = skip_if_dynamo + + def _get_training_flags(self, module_info): + training_flags = [] + if (self.train_eval_mode == TrainEvalMode.train_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(True) + + if (self.train_eval_mode == TrainEvalMode.eval_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(False) + + # If train and eval modes don't differ for the module, don't bother using more than one. + if not module_info.train_and_eval_differ: + training_flags = training_flags[:1] + + return training_flags + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError('The @modules decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + for module_info in self.module_info_list: + dtypes = set(module_info.supported_dtypes(device_cls.device_type)) + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + training_flags = self._get_training_flags(module_info) + for (training, dtype) in product(training_flags, dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = module_info.formatted_name + if len(training_flags) > 1: + test_name += f"_{'train_mode' if training else 'eval_mode'}" + + # Construct parameter kwargs to pass to the test. + param_kwargs = {'module_info': module_info} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + _update_param_kwargs(param_kwargs, 'training', training) + + try: + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR: + test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper) + + decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for module {module_info.name}!") + raise ex + + +def get_module_common_name(module_cls): + if module_cls in MODULE_CLASS_NAMES: + # Example: "nn.Linear" + return MODULE_CLASS_NAMES[module_cls] + else: + return module_cls.__name__ + + +class FunctionInput: + """ Contains args and kwargs to pass as input to a function. """ + __slots__ = ['args', 'kwargs'] + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class ModuleInput: + """ Contains args / kwargs for module instantiation + forward pass. """ + __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn'] + + def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None): + self.constructor_input = constructor_input # Inputs to pass during construction + self.forward_input = forward_input # Inputs to pass to forward() + self.desc = desc # Description for this set of inputs + self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs) + + if reference_fn is not None: + + @wraps(reference_fn) + def copy_reference_fn(m, *args, **kwargs): + # Copy inputs to avoid undesired side effects from calling the reference. + args, kwargs = deepcopy(args), deepcopy(kwargs) + + # Note that module parameters are passed in for convenience. + return reference_fn(m, list(m.parameters()), *args, **kwargs) + + self.reference_fn = copy_reference_fn + +class ModuleErrorEnum(Enum): + """ Enumerates when error is raised when testing modules. """ + CONSTRUCTION_ERROR = 0 + FORWARD_ERROR = 1 + +class ErrorModuleInput: + """ + A ModuleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["module_error_input", "error_on", "error_type", "error_regex"] + + def __init__(self, + module_error_input, + *, + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex): + self.module_error_input = module_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class ModuleInfo: + """ Module information to be used in testing. """ + + def __init__(self, + module_cls, # Class object for the module under test + *, + module_inputs_func, # Function to generate module inputs + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + dtypes=floating_types(), # dtypes this function is expected to work with + dtypesIfMPS=(torch.float16, torch.float32,), # dtypes this function is expected to work with on MPS + supports_gradgrad=True, # whether the op supports second order gradients + gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck + module_memformat_affects_out=False, # whether converting module to channels last will generate + # channels last output + train_and_eval_differ=False, # whether the module has differing behavior between train and eval + module_error_inputs_func=None, # Function to generate module inputs that error + ): + self.module_cls = module_cls + self.module_inputs_func = module_inputs_func + self.decorators = (*(decorators if decorators else []), *(skips if skips else [])) + self.dtypes = dtypes + self.dtypesIfMPS = dtypesIfMPS + self.supports_gradgrad = supports_gradgrad + self.gradcheck_nondet_tol = gradcheck_nondet_tol + self.module_memformat_affects_out = module_memformat_affects_out + self.train_and_eval_differ = train_and_eval_differ + self.module_error_inputs_func = module_error_inputs_func + self.is_lazy = issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active(test_class, test_name, device, dtype, param_kwargs): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == 'mps': + return self.dtypesIfMPS + else: + return self.dtypes + + @property + def name(self): + return get_module_common_name(self.module_cls) + + @property + def formatted_name(self): + return self.name.replace('.', '_') + +# Start of module inputs functions. + +def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(10, 8), + forward_input=FunctionInput(input=make_input((4, 10))), + reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)), + ModuleInput(constructor_input=FunctionInput(10, 8, bias=False), + forward_input=FunctionInput(make_input((4, 10))), + desc='no_bias', + reference_fn=lambda m, p, i: torch.mm(i, p[0].t())), + ModuleInput(constructor_input=FunctionInput(3, 5), + forward_input=FunctionInput(make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1]) + ] + + return module_inputs + + +def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def bilinear_reference_fn(m, p, x1, x2, bias=True): + result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2) + if bias: + if x1.shape[0] == 1: + result = result.view(-1) + p[1] + else: + result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0]) + return result + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + reference_fn=bilinear_reference_fn), + ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + desc='no_bias', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)), + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input(2), make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))), + ] + + return module_inputs + + +def module_inputs_torch_nn_KLDivLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_batchmean', {'reduction': 'batchmean'}), + ('reduction_none', {'reduction': 'none'}), + ('log_target', {'log_target': True}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return kldivloss_reference(i, t, **constructor_kwargs) + + input = make_input((10, 10)).log() + target = make_input((10, 10)) if kwargs.get('log_target', False) else make_input((10, 10)).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, target), + desc=desc, + reference_fn=reference_fn) + ) + + scalar_input = make_input(()).log() + scalar_target = make_input(()) if kwargs.get('log_target', False) else make_input(()).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(scalar_input, scalar_input), + desc='scalar_' + desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_none', {'reduction': 'none'}), + ('ignore_index', {'ignore_index': 2}), + ('weights', {'weight': make_weight(4).abs()}), + ('weights_ignore_index', {'weight': make_weight(4).abs(), 'ignore_index': 2}), + ('weights_ignore_index_neg', {'weight': make_weight(4).abs(), 'ignore_index': -1}) + ] + + # TODO: Uncomment when negative weights is supported. + # negative_weight = make_weight(10) + # negative_weight[0] = -1 + # cases.append(('weights_negative', {'weight': negative_weight})) + module_inputs = [] + for desc, constructor_kwargs in cases: + + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nllloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 4)), + torch.empty(15, device=device).uniform_().mul(4).floor().long()), + desc=desc, + reference_fn=reference_fn) + ) + + def nd_reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nlllossNd_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5)), + torch.empty(2, 5, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"nd_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5, 2, 2)), + torch.empty(2, 5, 5, 2, 2, device=device).uniform_().mul(4).floor().long()), + desc=f"higher_dim_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5)), + torch.empty(2, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"3d_{desc}", + reference_fn=nd_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(3), + make_target(3), + make_input(1).abs()), + desc=desc, + reference_fn=no_batch_dim_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_PoissonNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('full', {'full': True}), + ('no_log_input', {'log_input': False}), + ('full_no_log_input', {'full': True, 'log_input': False}), + ] + + def poissonnllloss_reference_fn(i, t, log_input=True, full=False, reduction='mean', eps=1e-8): + if log_input: + result = i.exp() - t.mul(i) + else: + result = i - t.mul((i + eps).log()) + + if full: + result += (t.mul(t.log()) - t + 0.5 * (2. * math.pi * t).log()).masked_fill(t <= 1, 0) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return poissonnllloss_reference_fn(i, t, **constructor_kwargs) + + log_input = constructor_kwargs.get('log_input', True) + input = make_input((2, 3, 4, 5)) if log_input else make_input((2, 3, 4, 5)).abs().add(0.001) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, + make_target((2, 3, 4, 5)).floor_().abs_()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MSELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + def mse_loss_reference_fn(m, p, i, t, reduction='mean'): + if reduction == 'none': + return (i - t).pow(2) + elif reduction == 'mean': + return (i - t).pow(2).sum() / i.numel() + else: + return (i - t).pow(2).sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 4, 5)), + make_target((2, 3, 4, 5))), + desc=desc, + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(())), + desc=f'{desc}_scalar', + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def no_batch_dim_reference_fn(m, p, *args, **kwargs): + """Reference function for modules supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + + Currently it only supports modules which return a single Tensor as output. + You can bind the following kwargs. + Kwargs: + batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` . + and output will be squeezed at dim `0` else dim `1` for both. + kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze. + Useful if there are few arguments whose batch dimension are different + from the ones selected by `batch_first`. + is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly. + """ + def get_and_pop(key, default): + v = kwargs.get(key, default) + if key in kwargs: + kwargs.pop(key) + return v + + batch_dim = 0 if get_and_pop('batch_first', True) else 1 + kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None) + is_criterion = get_and_pop('is_criterion', False) + + if kwargs_to_batchify is not None: + assert isinstance(kwargs_to_batchify, dict) + for k, v in kwargs.items(): + if k in kwargs_to_batchify and v is not None: + bdim = kwargs_to_batchify[k] + kwargs[k] = v.unsqueeze(bdim) + + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim) + + if is_criterion: + reduction = get_reduction(m) + if reduction == 'none': + return output.squeeze(0) + return output + + +def no_batch_dim_reference_mha(m, p, *args, **kwargs): + """Reference function for MultiheadAttention supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + batch_dim = 0 if kwargs.get('batch_first', True) else 1 + if 'batch_first' in kwargs: + kwargs.pop('batch_first') + if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: + kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(0)) + + +def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): + """Reference function for RNN and GRU supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = h.unsqueeze(1) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(1)) + + +def no_batch_dim_reference_lstm(m, p, *args, **kwargs): + """Reference function for LSTM supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = (h[0].unsqueeze(1), h[1].unsqueeze(1)) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1))) + + +def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs): + """Reference function for LSTMCell supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + inp, (h, c) = args + single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0))) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(0), output[1].squeeze(0)) + + +def generate_regression_criterion_inputs(make_input): + return [ + ModuleInput( + constructor_input=FunctionInput(reduction=reduction), + forward_input=FunctionInput(make_input((4, )), make_input(4,)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True), + desc=f'no_batch_dim_{reduction}' + ) for reduction in ['none', 'mean', 'sum']] + + +def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(kernel_size=2), + forward_input=FunctionInput(make_input((3, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((2, 3, 6)))), + ModuleInput(constructor_input=FunctionInput((2,), (2,)), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, 1), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride_pad')] + + +def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((3, 6, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride_pad')] + + + +def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((3, 4, 4, 4))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride_pad_gpu_input_nooverlap'), + ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='divisor_stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride_pad_gpu_input_nooverlap')] + + + +def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='one_output')] + + +def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single_1x1output'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + +def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 2, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((None, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput((3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))), + desc='last_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + + +def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))), + desc='single_nonatomic'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))), + desc='tuple_nonatomic')] + + +def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(10,), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine'), + ModuleInput(constructor_input=FunctionInput(5,), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, None), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine_simple_average'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input_not_affine'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 9))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='2d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='3d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs): + N = kwargs['N'] + lazy = kwargs.get('lazy', False) + transposed = kwargs.get('transposed', False) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}] + kernel_size, C_in, C_out = 3, 4, 5 + input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N)) + input_batch_shape = (2,) + input_no_batch_shape + return [ + ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else + FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)), + forward_input=FunctionInput(make_input( + input_batch_shape if with_batch else input_no_batch_shape)), + desc=('' if with_batch else 'no_batch_dim'), + reference_fn=(None if with_batch else no_batch_dim_reference_fn)) + for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list) + ] + + +def module_inputs_torch_nn_CosineEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.7}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return cosineembeddingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10)), make_input((15, 10)), + make_target((15,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input')] + + +def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((5, 6, 7))), + desc='dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((4,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='with_negval'), + ModuleInput(constructor_input=FunctionInput(0.0), + forward_input=FunctionInput(make_input((10, 10))), + desc='with_zero_negval'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input(())), + desc='with_negval_scalar')] + + +def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d_multiparam')] + + +def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar')] + + +def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))] + + +def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), + desc='multiparam'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), + desc='multiparam_scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 5, 10))), + desc='multidim'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((3, 4, 10))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.log(1 + torch.exp(i))), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: 1. / 2. * torch.log(1 + torch.exp(2 * i)), + desc='beta'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input(())), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='lambda'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input(())), + desc='lambda_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + + +def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='threshold_value'), + ModuleInput(constructor_input=FunctionInput(2., 10.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='large_value'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(())), + desc='threshold_value_scalar'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4)), + make_input((2, 3, 4))), + reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum() + for a, b in zip(i, t))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(()), make_input(())), + reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(), + desc='scalar')] + generate_regression_criterion_inputs(make_input) + + +def module_inputs_torch_nn_SmoothL1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return smoothl1loss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_input(())), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + + +def module_inputs_torch_nn_BCELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ] + + def bce_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = -(t * i.log() + (1 - t) * (1 - i).log()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_loss_reference_fn, **constructor_kwargs)) + ) + + scalar_weight = make_weight(()) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(weight=scalar_weight), + forward_input=FunctionInput(make_input((), low=1e-2, high=1 - 1e-2), + make_target(()).gt(0).to(dtype)), + desc='scalar_weight', + reference_fn=partial(bce_loss_reference_fn, weight=scalar_weight)) + ) + + return module_inputs + + +def module_inputs_torch_nn_BCEWithLogitsLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ('scalar_weights', {'weight': make_weight(())}) + ] + + def bce_withlogitsloss_reference_fn(m, p, i, t, reduction='mean', weight=None): + # TODO: add pos_weight to the definition here and corresponding SampleInputs + max_val = (-i).clamp(min=0) + result = (1 - t).mul_(i).add_(max_val).add_((-max_val).exp_().add_((-i - max_val).exp_()).log_()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_withlogitsloss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + reductions: List[str] = ['mean', 'sum', 'none'] + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('weights', {'weight': make_weight((3,))}), + ('ignore_index', {'ignore_index': 1}), + ('label_smoothing', {'label_smoothing': 0.15}), + ('ignore_index_label_smoothing', {'ignore_index': 1, 'label_smoothing': 0.15}) + ] + + module_inputs = [] + for reduction, (desc, constructor_kwargs) in product(reductions, cases): + def reference_fn(m, p, i, t, reduction=reduction, constructor_kwargs=constructor_kwargs): + return cross_entropy_loss_reference(i, t, reduction=reduction, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5)), + make_target((2, 5, 5), low=0, high=3)), + desc=f"4d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5)), + make_target((2, 5), low=0, high=3)), + desc=f"3d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3)), + make_target((2), low=0, high=3)), + desc=f"2d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_target((2, 5, 5, 2, 2), low=0, high=3)), + desc=f"higher_dim_{desc}_{reduction}", + reference_fn=reference_fn) + ) + + if constructor_kwargs.get('ignore_index', None) is None: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4, 2)), + make_input((5, 3, 4, 2)).softmax(dim=1)), + desc=f"4d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4)), + make_input((5, 3, 4)).softmax(dim=1)), + desc=f"3d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3)), + make_input((5, 3)).softmax(dim=1)), + desc=f"2d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_input((2, 3, 5, 5, 2, 2)).softmax(dim=1)), + desc=f"higher_dim_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((3,)), + make_target((), low=0, high=3)), + desc=f"no_batch_dim_{desc}_{reduction}", + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + + return module_inputs + + + +def module_inputs_torch_nn_CTCLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('blank', {'blank': 14}) + ] + target_dtypes = [torch.int, torch.long] + + module_inputs = [] + for target_dtype, (desc, constructor_kwargs) in product(target_dtypes, cases): + def reference_fn(m, p, i, t, il, tl, constructor_kwargs=constructor_kwargs): + return ctcloss_reference(i, t, il, tl, **constructor_kwargs) + + blank = constructor_kwargs.get('blank', 0) + low = 0 if blank == 14 else 1 + high = 14 if blank == 14 else 15 + + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_lengths_tensors', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_1d_target_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_1d_target_lengths_tensors', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 5))), + desc='1d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 12, 1e-3), + forward_input=FunctionInput(make_input((4, 12))), + desc='1d_affine_GN'), + ModuleInput( + constructor_input=FunctionInput(1, 6, 1e-3), + forward_input=FunctionInput(make_input((150, 6))), + desc='1d_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput(5, 5, 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 10, 1e-3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='1d_no_affine_LN'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 2, 3))), + desc='2d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 28, 28))), + desc='2d_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 51, 1e-5, False), + forward_input=FunctionInput(make_input((2, 51, 28, 28))), + desc='2d_no_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_LN'), + ] + + +def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input((4, 3, 2, 4))), + ), + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input(())), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input') + ] + + +def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_HingeEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return hingeembeddingloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10,)).gt(0).to(dtype).mul_(2).sub_(1)), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(()).gt(0).to(dtype).mul_(2).sub_(1)), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_HuberLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return huberloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lazy = kwargs.get('lazy', False) + N = kwargs['N'] + num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True + input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)} + input_no_batch_shape = input_no_batch_shape_dict[N] + input_batch_shape = (4,) + input_no_batch_shape + + return [ + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_batch_shape))), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_batch_shape)), + desc='tracking_stats'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='tracking_stats_no_batch_dim'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim') + ] + +def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine_no_bias'), + ] + +def module_inputs_torch_nn_RMSNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def rms_norm_reference_fn(m, p, i): + eps = m.eps + if eps is None: + eps = torch.finfo(i.dtype).eps + ndim = i.ndim + normalized_shape = m.normalized_shape + weight = m.weight + dims = [ndim - i - 1 for i in range(len(normalized_shape))] + result = i * torch.rsqrt(i.pow(2).mean(dim=dims, keepdim=True) + m.eps) + if weight is not None: + result *= weight + return result + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ] + + +def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 5, 7))), + desc='1d'), + ModuleInput( + constructor_input=FunctionInput(2,), + forward_input=FunctionInput(make_input((1, 5, 7, 7))), + desc='2d_uneven_pad'), + ModuleInput( + constructor_input=FunctionInput(1, 1., 0.5, 2.), + forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))), + desc='3d_custom_params'), + ] + + +def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7))), + desc='norm'), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((1, 3, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((3, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ] + + + +def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_LPPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput(4, 4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(4, return_indices=True), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((3, 7, 7))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='4d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='return_indices'), + ] + +def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))), + ModuleInput( + constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_padding'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 7, 6))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 5, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 7, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))), + desc='asymsize'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 5, 5, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format' + ) + ] + + +def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.sigmoid().log(), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: i.sigmoid().log(), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ] + + +def module_inputs_torch_nn_MarginRankingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return marginrankingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50,)), make_input((50,)), + make_target((50,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multilabelmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10), low=0, high=10)), + desc=f'1d_{desc}', + reference_fn=reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('p', {'p': 2}), + ('margin', {'margin': 0.5}), + ('weights', {'weight': make_weight(10)}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multimarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelSoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weight', {'weight': make_weight(10)}), + ] + + def multilabelsoftmargin_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log() + if weight is not None: + result *= weight + result = (-result).sum(i.dim() - 1) / i.size(-1) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.mean() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=2)), + desc=desc, + reference_fn=partial(multilabelsoftmargin_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_SoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return softmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 5)), + make_target((5, 5)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs): + # Reuse the TransformerEncoderLayer samples since the forward args are nearly the same. + samples = [] + for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer( + None, device, dtype, requires_grad, training): + # Construct a TransformerEncoderLayer object to pass to TransformerEncoder. + l_args, l_kwargs = (layer_module_input.constructor_input.args, + layer_module_input.constructor_input.kwargs) + l_kwargs['device'] = device + l_kwargs['dtype'] = dtype + encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs) + num_layers = 2 + # Note: TransformerEncoderLayer takes a "src_mask" while + # TransformerEncoder takes a "mask"; rename kwarg appropriately. + forward_input = layer_module_input.forward_input + if 'src_mask' in forward_input.kwargs: + forward_input.kwargs['mask'] = forward_input.kwargs['src_mask'] + del forward_input.kwargs['src_mask'] + samples.append(ModuleInput( + constructor_input=FunctionInput(encoder_layer, num_layers), + forward_input=forward_input, + desc=layer_module_input.desc + )) + return samples + +def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='no_bias' + ),] + + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for src_mask, src_key_padding_mask, norm_first, batch_first, bias in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, kwargs_to_batchify={'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + # Samples below where we pass reference_fn are for validating the fast path, + # since the fast path requires no_grad mode, we run the fast path in .eval() + # and no_grad() in the reference_fn and verify that against the results in train mode. + def fast_path_reference_fn(module, parameters, *args, **kwargs): + assert module.training + module.train(False) + with torch.no_grad(): + output = module(*args, **kwargs) + module.train(True) + return output + + if training: + for norm_first, bias in itertools.product((True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput( + 4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias + ), + forward_input=FunctionInput( + make_input((2, 3, 4)), + ), + # fastpath doesn't run when bias=False + reference_fn=fast_path_reference_fn if bias else None, + desc=f'fastpath_{bias}_norm_first_{norm_first}' + ) + ) + + return samples + + +def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='no_bias' + ), ] + + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for tgt_mask, tgt_key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + memory_mask = tgt_mask + memory_key_padding_mask = tgt_key_padding_mask + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src, tgt = src.transpose(0, 1), tgt.transpose(0, 1) + if tgt_key_padding_mask is not None: + memory_key_padding_mask, tgt_key_padding_mask = (tgt_key_padding_mask.expand(2, 3),) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + desc=f'norm_first_{norm_first}_batch_first_{batch_first}_bias_{bias}' + )) + + return samples + + +def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for mask, key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + src_mask , tgt_mask = (mask,) * 2 + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src = src.transpose(0, 1) + tgt = tgt.transpose(0, 1) + if key_padding_mask is not None: + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask.expand(2, 3),) * 2 + + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + )) + return samples + + +def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs): + make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False) + return [ + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(2, 3).random_(4)) + ), + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)), + desc='discontiguous' + ), + ] + + +def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + bool_vals = (True, False) + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3))) + products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks) + for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products: + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=no_batch_dim_reference_mha, + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=partial(no_batch_dim_reference_mha, batch_first=False), + ) + ) + + return samples + + +def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ] + + is_rnn = kwargs.get('is_rnn', False) + if is_rnn: + # RNN also supports `nonlinearity` argument. + # `tanh` is the default, so we check with `relu` + samples.append( + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = ( + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ) + + return samples + +def make_packed_sequence(inp, batch_sizes): + required_grad = inp.requires_grad + inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads + seq = pack_padded_sequence(inp, batch_sizes) + seq.data.requires_grad_(required_grad) + return seq + + +def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + is_rnn = kwargs['is_rnn'] + nonlinearity = ('relu', 'tanh') + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + + samples = [] + if is_rnn: + prod_gen = product(nonlinearity, bias, batch_first, bidirectional) + else: + prod_gen = product(bias, batch_first, bidirectional) + + for args in prod_gen: + if is_rnn: + nl, b, b_f, bidir = args + else: + b, b_f, bidir = args + + cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + if is_rnn: + cons_args['nonlinearity'] = nl + cons_args_hidden['nonlinearity'] = nl + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((3, 2))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + if with_packed_sequence: + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + proj_sizes = (0, 2) + + samples = [] + prod_gen = product(bias, batch_first, bidirectional, proj_sizes) + + for args in prod_gen: + b, b_f, bidir, proj_size = args + hidden_size = 3 + cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((2, 2))), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + h_out = proj_size if proj_size > 0 else hidden_size + hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size))) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), hx), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + + return samples + + + +def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))), + ), + ] + +def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 2), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2), 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4), 5), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7), + forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))), + ), + ] + +def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding1d_circular_ref(inp, pad): + r""" input: + [[[0., 1., 2.], + [3., 4., 5.]]] + pad: (1, 2) + output: + [[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.]]] + """ + return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 1)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding2d_circular_ref(inp, pad): + r"""input: + [[[[0., 1., 2], + [3., 4., 5.]]]] + pad: (1, 2, 2, 1) + output: + [[[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.]]]] + """ + inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2) + return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 2, 1)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((2, 3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 3, 1)), + forward_input=FunctionInput(make_input((1, 1, 3, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + def padding3d_circular_ref(inp, pad): + r"""input: + [[[[[ 0., 1., 2.], + [ 3., 4., 5.]], + [[ 6., 7., 8.], + [ 9., 10., 11.]]]]] + pad: (1, 2, 2, 1, 1, 2) + output: [[[[[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]]]]] + """ + inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2) + inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3) + return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ] + + +# All these operators share similar issues on cuDNN and MIOpen +rnn_gru_lstm_module_info_decorators = ( + # RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward. + # We could not generate a fallback + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_grad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented. + # Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_gradgrad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # CUDNN GRU doesn't accept non-contiguous hx + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float). + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestExpandedWeightModule", "test_module", + device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestDecomp", "test_rnn_decomp_module", + device_type='cuda' + ) +) + +# Start of module error inputs functions. + +def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hidden to be 1D or 2D, got 4D instead" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'relu'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'tanh'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ] + return samples + +def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead" + ), + ] + return samples + + +def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs): + samples = [ + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 0, 1)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="hidden_size must be greater than zero" + ), + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 10, 0)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="num_layers must be greater than zero" + ), + ] + return samples + +def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 2D or 3D input \(got 4D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 3D or 4D input \(got 2D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 4D or 5D input \(got 2D input\)", + + ), + ] + + +# Database of ModuleInfo entries in alphabetical order. +module_db: List[ModuleInfo] = [ + ModuleInfo(torch.nn.AdaptiveAvgPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS), + # Fails on backward check if output size is 1x1 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + ),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AvgPool1d, + module_inputs_func=module_inputs_torch_nn_AvgPool1d, + ), + ModuleInfo(torch.nn.AvgPool2d, + module_inputs_func=module_inputs_torch_nn_AvgPool2d, + skips=( + # The difference between channels last backward and + # channels first backward of AvgPool2d on CUDA is too large + # See https://github.com/pytorch/pytorch/issues/107201 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='cuda', + ), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),), + ), + ModuleInfo(torch.nn.AvgPool3d, + module_inputs_func=module_inputs_torch_nn_AvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # No channels_last support for AvgPool1d as it does not take 4D inputs + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.BatchNorm1d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm1d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + )) + ), + ModuleInfo(torch.nn.BatchNorm2d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm2d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + ),) + ), + ModuleInfo(torch.nn.BatchNorm3d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm3d, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + ),) + ), + ModuleInfo(torch.nn.CELU, + module_inputs_func=module_inputs_torch_nn_CELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Conv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Conv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]),), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Fails on backward check because ViewAsRealBackward apply contiguous for grad + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64, torch.complex128]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # Not implemented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True), + dtypes=floating_and_complex_types_and(torch.chalf), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # ConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + # These fail only on ROCm + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.CosineEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_CosineEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ELU, + module_inputs_func=module_inputs_torch_nn_ELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.FractionalMaxPool2d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.FractionalMaxPool3d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.L1Loss, + module_inputs_func=module_inputs_torch_nn_L1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.SmoothL1Loss, + module_inputs_func=module_inputs_torch_nn_SmoothL1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LazyConv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Linear, + module_inputs_func=module_inputs_torch_nn_Linear, + skips=( + # No channels_last support for Linear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Bilinear, + module_inputs_func=module_inputs_torch_nn_Bilinear, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-4, rtol=1e-4), + torch.float64: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_forward', device_type='cpu'), + ], + skips=( + # No channels_last support for Bilinear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LPPool1d, + module_inputs_func=module_inputs_torch_nn_LPPool1d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.LPPool2d, + module_inputs_func=module_inputs_torch_nn_LPPool2d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LPPool3d, + module_inputs_func=module_inputs_torch_nn_LPPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps),) + ), + ModuleInfo(torch.nn.MaxPool1d, + module_inputs_func=module_inputs_torch_nn_MaxPool1d, + ), + ModuleInfo(torch.nn.MaxPool2d, + module_inputs_func=module_inputs_torch_nn_MaxPool2d, + ), + ModuleInfo(torch.nn.MaxPool3d, + module_inputs_func=module_inputs_torch_nn_MaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.KLDivLoss, + module_inputs_func=module_inputs_torch_nn_KLDivLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # https://github.com/pytorch/pytorch/issues/115588 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MSELoss, + module_inputs_func=module_inputs_torch_nn_MSELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MarginRankingLoss, + module_inputs_func=module_inputs_torch_nn_MarginRankingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiLabelMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multilabel_margin_loss_forward' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::multilabel_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MultiMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multi_margin_loss' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # RuntimeError: derivative for aten::multi_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.SoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_SoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MultiLabelSoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelSoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.NLLLoss, + module_inputs_func=module_inputs_torch_nn_NLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GaussianNLLLoss, + module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.PoissonNLLLoss, + module_inputs_func=module_inputs_torch_nn_PoissonNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.HingeEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_HingeEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.HuberLoss, + module_inputs_func=module_inputs_torch_nn_HuberLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: seemingly incorrect output dtype + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCELoss, + module_inputs_func=module_inputs_torch_nn_BCELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCEWithLogitsLoss, + module_inputs_func=module_inputs_torch_nn_BCEWithLogitsLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # see #119108: tolerance issue + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.CrossEntropyLoss, + module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss, + dtypes=get_all_fp_dtypes(include_half=True, include_bfloat16=False), + decorators=( + # No channels_last support for loss functions. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-2, rtol=1e-3)}), "TestModule", + "test_forward", dtypes=[torch.float16], device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16], + device_type='cuda'),), + ), + ModuleInfo(torch.nn.CTCLoss, + module_inputs_func=module_inputs_torch_nn_CTCLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # The operator aten::_ctc_loss is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # https://github.com/pytorch/pytorch/issues/115585 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),) + ), + ModuleInfo(torch.nn.GELU, + module_inputs_func=module_inputs_torch_nn_GELU, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GLU, + module_inputs_func=module_inputs_torch_nn_GLU, + ), + ModuleInfo(torch.nn.GroupNorm, + module_inputs_func=module_inputs_torch_nn_GroupNorm, + dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True), + skips=( + # Tracking at https://github.com/pytorch/pytorch/issues/98089 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_memory_format', device_type='cpu'), + # No channels_last support for GroupNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'), + DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad", + active_if=TEST_WITH_ROCM, device_type='cuda'),) + ), + ModuleInfo(torch.nn.Hardshrink, + module_inputs_func=module_inputs_torch_nn_Hardshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),), + ), + ModuleInfo(torch.nn.Hardswish, + module_inputs_func=module_inputs_torch_nn_Hardswish, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),), + supports_gradgrad=False), + ModuleInfo(torch.nn.Hardtanh, + module_inputs_func=module_inputs_torch_nn_Hardtanh, + ), + ModuleInfo(torch.nn.InstanceNorm1d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm1d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm2d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm2d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm3d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3), + train_and_eval_differ=True, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # No channels_last support for InstanceNorm3d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LocalResponseNorm, + module_inputs_func=module_inputs_torch_nn_LocalResponseNorm, + skips=( + # uses avg_pool3d which is not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.LayerNorm, + module_inputs_func=module_inputs_torch_nn_LayerNorm, + skips=( + # No channels_last support for LayerNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.RMSNorm, + module_inputs_func=module_inputs_torch_nn_RMSNorm, + ), + # TransformerEncoder takes the same inputs as TransformerEncoderLayer + ModuleInfo(torch.nn.TransformerEncoder, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoder, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # Doesn't support device / dtype kwargs directly because it is just a + # container of TransformerEncoderLayers. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'),) + ), + ModuleInfo(torch.nn.TransformerEncoderLayer, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='cpu', active_if=IS_WINDOWS), + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.TransformerDecoderLayer, + module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerDecoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Transformer, + module_inputs_func=module_inputs_torch_nn_Transformer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for Transformer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiheadAttention, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_MultiheadAttention, + skips=( + # No channels_last support for MultiheadAttention currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Embedding, + module_inputs_func=module_inputs_torch_nn_Embedding, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='mps')], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ReLU, + module_inputs_func=module_inputs_torch_nn_ReLU, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LeakyReLU, + module_inputs_func=module_inputs_torch_nn_LeakyReLU, + ), + ModuleInfo(torch.nn.ReLU6, + module_inputs_func=module_inputs_torch_nn_ReLU6, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.PReLU, + module_inputs_func=module_inputs_torch_nn_PReLU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNNCell, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.GRUCell, + module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.LSTMCell, + module_inputs_func=module_inputs_torch_nn_LSTMCell, + module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell, + ), + ModuleInfo(torch.nn.Sigmoid, + module_inputs_func=module_inputs_torch_nn_Sigmoid, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LogSigmoid, + module_inputs_func=module_inputs_torch_nn_LogSigmoid, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.SiLU, + module_inputs_func=module_inputs_torch_nn_SiLU, + ), + ModuleInfo(torch.nn.Softmax, + module_inputs_func=module_inputs_torch_nn_Softmax, + ), + ModuleInfo(torch.nn.Softmax2d, + module_inputs_func=module_inputs_torch_nn_Softmax2d, + skips=( + # no channels last support for Softmax2d currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LogSoftmax, + module_inputs_func=module_inputs_torch_nn_LogSoftmax, + skips=( + # no channels last support for LogSoftmax currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: inf nan error + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Softmin, + module_inputs_func=module_inputs_torch_nn_Softmin, + skips=( + # no channels last support for Softmin currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Softplus, + module_inputs_func=module_inputs_torch_nn_Softplus, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softshrink, + module_inputs_func=module_inputs_torch_nn_Softshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softsign, + module_inputs_func=module_inputs_torch_nn_Softsign, + ), + ModuleInfo(torch.nn.Tanh, + module_inputs_func=module_inputs_torch_nn_Tanh, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Tanhshrink, + module_inputs_func=module_inputs_torch_nn_Tanhshrink, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Threshold, + module_inputs_func=module_inputs_torch_nn_Threshold, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Mish, + module_inputs_func=module_inputs_torch_nn_Mish, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNN, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators + ), + ModuleInfo(torch.nn.GRU, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.LSTM, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_LSTM, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + # LSTM with projections is not currently supported with MPS + DecorateInfo(skipMPS),), + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.ReflectionPad1d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad1d, + ), + ModuleInfo(torch.nn.ReflectionPad2d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReflectionPad3d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad1d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad1d, + ), + ModuleInfo(torch.nn.ReplicationPad2d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad3d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.SELU, + module_inputs_func=module_inputs_torch_nn_SELU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.ZeroPad1d, + module_inputs_func=module_inputs_torch_nn_ZeroPad1d, + ), + ModuleInfo(torch.nn.ZeroPad2d, + module_inputs_func=module_inputs_torch_nn_ZeroPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ZeroPad3d, + module_inputs_func=module_inputs_torch_nn_ZeroPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.CircularPad1d, + module_inputs_func=module_inputs_torch_nn_CircularPad1d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad1d, + ), + ModuleInfo(torch.nn.CircularPad2d, + module_inputs_func=module_inputs_torch_nn_CircularPad2d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad2d, + ), + ModuleInfo(torch.nn.CircularPad3d, + module_inputs_func=module_inputs_torch_nn_CircularPad3d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),) + ), + ModuleInfo(torch.nn.ConstantPad1d, + module_inputs_func=module_inputs_torch_nn_ConstantPad1d, + ), + ModuleInfo(torch.nn.ConstantPad2d, + module_inputs_func=module_inputs_torch_nn_ConstantPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ConstantPad3d, + module_inputs_func=module_inputs_torch_nn_ConstantPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ) +] diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..ac4a7f920cc2e78e70827b211456e62ecc425ade --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py @@ -0,0 +1,1920 @@ +# mypy: ignore-errors + +import functools +import itertools +import sys +import unittest +from copy import deepcopy +from enum import Enum +from typing import Any, Dict, List, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import Parameter +from torch.optim import ( + Adadelta, + Adagrad, + Adam, + Adamax, + AdamW, + ASGD, + LBFGS, + NAdam, + Optimizer, + RAdam, + RMSprop, + Rprop, + SGD, + SparseAdam, +) +from torch.optim.lr_scheduler import ( + ConstantLR, + ExponentialLR, + LinearLR, + PolynomialLR, + ReduceLROnPlateau, + StepLR, +) +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_utils import ( + _TestParametrizer, + skipIfMps, + skipIfTorchDynamo, + TEST_WITH_TORCHDYNAMO, +) +from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices + + +class OptimizerInput: + """Contains args / kwargs to be passed to an optimizer constructor.""" + + __slots__ = ["params", "kwargs", "desc"] + + def __init__( + self, + params: Union[List[Parameter], List[Tensor], Dict[Any, Any]], + kwargs: Dict[str, Any], + desc: str = "", + ): + # params can be a list of Tensors OR param_groups OR None + self.params = params + self.kwargs = kwargs + self.desc = desc + + def __repr__(self): + return f"params={self.params}, kwargs={self.kwargs}, desc={self.desc}" + + +class OptimizerErrorEnum(Enum): + """Enumerates when an error is raised when testing optimizers.""" + + CONSTRUCTION_ERROR = 0 + STEP_ERROR = 1 + + +class ErrorOptimizerInput: + """ + An OptimizerInput that will cause the optimizer to throw an error when constructed. + Includes the type and string of the resulting error. + """ + + __slots__ = ["optimizer_error_input", "error_on", "error_type", "error_regex"] + + def __init__( + self, + optimizer_error_input, + *, + error_on=OptimizerErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex="", + ): + self.optimizer_error_input = optimizer_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class OptimizerInfo: + """Optimizer information to be used in testing.""" + + def __init__( + self, + optim_cls: Optimizer, # Class object for the Optimizer under test + *, + # Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility + # to the test using the OptimizerInfo. OptimizerInput.params is likely None. + # Can optionally take in device to filter out certain unsupported configs + optim_inputs_func, + # Tuple of lambdas to generate LRScheduler instances to run with the optimizer for the + # LRScheduler tests like test_forloop_goes_right_direction with_lrsched. + # We DO NOT expect to thoroughly test LRSchedulers through the optimizers, so not every + # LRScheduler configuration will be included. See test_lrscheduler.py for that instead. + # A few optimizers like SGD and Adam will test more LRSchedulers. + scheduler_inputs=( + [ + lambda opt: StepLR(opt, gamma=0.9, step_size=10), + lambda opt: ReduceLROnPlateau(opt), + ], + ), + # A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer + # supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means. + supported_impls: Tuple[str] = ("foreach", "differentiable"), + # the optim supports passing in sparse gradients as well as dense grads + supports_sparse: bool = False, + # the optim only supports one config: sparse grads w/ dense params, see SparseAdam + only_supports_sparse_grads: bool = False, + # Tuple of (optimizer kwargs, schedulers_constructors) specifically for sparse tests, + # with especially tuned hyperparameters. These only apply if the optimizer supports + # sparse parameters or grads. + metadata_for_sparse=({}, []), + # the optim supports complex parameters + supports_complex: bool = True, + # whether the optimizer.step() function requires a closure to be passed + step_requires_closure: bool = False, + # whether the optimizer supports per-param options with parameter groups + supports_param_groups: bool = True, + # whether the optimizer supports parameters on multiple devices + supports_multiple_devices: bool = True, + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + optim_error_inputs_func=None, # Function to generate optim inputs that error + supports_fused_on: Tuple[str] = (), + ): + self.optim_cls = optim_cls + self.optim_inputs_func = optim_inputs_func + self.scheduler_inputs = scheduler_inputs + self.supported_impls = supported_impls + self.supports_sparse = supports_sparse + self.metadata_for_sparse = metadata_for_sparse + self.only_supports_sparse_grads = only_supports_sparse_grads + self.supports_complex = supports_complex + self.step_requires_closure = step_requires_closure + self.supports_param_groups = supports_param_groups + self.supports_multiple_devices = supports_multiple_devices + self.decorators = ( + *(decorators if decorators else []), + *(skips if skips else []), + ) + self.optim_error_inputs_func = optim_error_inputs_func + self.supports_fused_on = supports_fused_on + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + @property + def name(self): + return self.optim_cls.__name__ + + +class optims(_TestParametrizer): + """Decorator for specifying a list of optimizers over which to run a test.""" + + def __init__(self, optim_info_iterable, dtypes=None): + self.optim_info_list = list(optim_info_iterable) + + # optimizers aren't limited to be one dtype as parameters can have different dtypes + # We default to torch.float32, but dtypes should be specified through passed in + # parameters. + self.dtypes = dtypes if dtypes is not None else [torch.float32] + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError( + "The @optims decorator is only intended to be used in a device-specific " + "context; use it with instantiate_device_type_tests() instead of " + "instantiate_parametrized_tests()" + ) + + for optim_info, dtype in itertools.product(self.optim_info_list, self.dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = optim_info.name + + # Construct parameter kwargs to pass to the test. + param_kwargs = {"optim_info": optim_info, "dtype": dtype} + + try: + + @functools.wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorator_fn = functools.partial( + optim_info.get_decorators, + generic_cls.__name__, + test.__name__, + device_cls.device_type, + dtype, + ) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print( + f"Failed to instantiate {test_name} for module {optim_info.name}!" + ) + raise ex + + +# Helper function for generating error inputs for all optimizers, used below. +def get_error_inputs_for_all_optims(device, dtype): + if str(device) == "cpu": + sample_param = Parameter(torch.randn(1, device=device, dtype=dtype)) + return [ + ErrorOptimizerInput( + OptimizerInput( + params=sample_param, + kwargs={}, + desc="invalid param type", + ), + error_type=TypeError, + error_regex="params argument given to the optimizer should be an iterable of Tensors or dicts", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[sample_param, sample_param], + kwargs={}, + desc="a param group cannot have duplicate parameters", + ), + error_type=UserWarning, + error_regex=".*a parameter group with duplicate parameters.*", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[{"params": sample_param}, {"params": sample_param}], + kwargs={}, + desc="duplicate parameters should not occur across param groups either", + ), + error_type=ValueError, + error_regex="some parameters appear in more than one parameter group", + ), + ] + else: + return [] + + +# ------------------------------------------------------------------------------------------ +# NOTE: [optimizer kwarg categories] +# We categorize optimizer kwargs as 3 types: +# 1. optimizer-specific flags are like amsgrad or rho or beta, flags that are specific to +# algorithms and thus only show up for certain optimizers. There are many of these, so I +# do not bother gathering them all and listing them here. The converse to these would be +# global flags that every optimizer ideally _should_ support. We break global flags into +# 2 further categories and list them all below. +# 2. global-friendly = ["lr", "weight_decay", "maximize", "capturable"] +# global-friendly flags are global flags who play nicely with all other global flags, +# i.e., are mutually exclusive in function. This means that any pair of the following +# flags can be toggled at once (e.g., maximize and weight_decay). Furthermore, any of the +# following flags theoretically can be enabled with ANY other global flag, including the +# cliquey ones (e.g, capturable and foreach). +# 3. global-cliquey = ["foreach", "fused", "differentiable"] +# global-cliquey flags are global flags that do NOT coexist with other cliquey flags, +# usually because they contradict each other in function. For example, one should not flip +# both foreach AND fused to True, because they are two differing performance optimizations +# in which you can only opt into one. +# +# The following optim_inputs_func_* sampling functions only return constructor combinations of +# optimizer-specific and global-friendly flags. This is because we are confident they would mesh +# well with additional kwargs. On the flip side of the same coin, we reserve setting the +# global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs. + + +def optim_inputs_func_adadelta(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "capturable": True}, + desc="capturable with weight decay", + ), + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001), "capturable": True}, + desc="Tensor lr with capturable", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput( + params=None, kwargs={"rho": 0.95, "weight_decay": 0.9}, desc="rho" + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_adadelta(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, rho=1.1), + desc="rho should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid rho value: 1.1", + ), + ] + return error_inputs + + +def optim_inputs_func_adagrad(device, dtype=None): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), + OptimizerInput( + params=None, + kwargs={"initial_accumulator_value": 0.1, "weight_decay": 0.1}, + desc="initial_accumulator_value", + ), + OptimizerInput( + params=None, + kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1}, + desc="lr_decay", + ), # TODO: Move out to testing in param_group? + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001)}, + desc="Tensor lr", + ), + ] + + +def optim_error_inputs_func_adagrad(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, lr_decay=-0.5), + desc="lr_decay must be bigger than 0", + ), + error_type=ValueError, + error_regex="Invalid lr_decay value: -0.5", + ), + ] + return error_inputs + + +# TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work +# with all implementation code paths... +def optim_inputs_func_adam(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "amsgrad": True, "capturable": True}, + desc="capturable, amsgrad", + ), + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001), "amsgrad": True, "capturable": True}, + desc="Tensor lr with capturable and amsgrad", + ), + ] + + total = [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1, "amsgrad": True}, desc="amsgrad" + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + if dtype in (torch.float16,): + for input in total: + """ + Too small eps will make denom to be zero for low precision dtype + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + For example, + >>> a + tensor([0.], dtype=torch.float16) + >>> a + 1e-8 + tensor([0.], dtype=torch.float16) + """ + input.kwargs["eps"] = 0.1 + return total + + +def optim_error_inputs_func_adam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-1), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -1", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=torch.tensor(0.001), foreach=True), + desc="lr as Tensor doesn't work with foreach & not capturable", + ), + error_type=ValueError, + error_regex="lr as a Tensor is not supported for capturable=False and foreach=True", + ), + ] + if "cuda" in str(device): + sample_tensor = torch.empty((), device=device, dtype=dtype) + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=[sample_tensor], + kwargs={"foreach": True, "fused": True}, + desc="`fused` and `foreach` cannot be `True` together", + ), + error_type=RuntimeError, + error_regex="`fused` and `foreach` cannot be `True` together", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[sample_tensor], + kwargs={"fused": True, "differentiable": True}, + desc="`fused` does not support `differentiable`", + ), + error_type=RuntimeError, + error_regex="`fused` does not support `differentiable`", + ), + ] + return error_inputs + + +def optim_inputs_func_adamax(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "maximize": True, "capturable": True}, + desc="capturable, maximize, weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0, "maximize": True, "capturable": True}, + desc="capturable, maximize", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True}, + desc="capturable, weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "lr": torch.tensor(0.001), + "weight_decay": 0.9, + "maximize": False, + "capturable": True, + }, + desc="capturable, weight_decay, tensor LR", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_adamax(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(0.0, 1.0)), + desc="beta2 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 1: 1.0", + ), + ] + return error_inputs + + +def optim_inputs_func_adamw(device, dtype=None): + return optim_inputs_func_adam(device, dtype) + + +def optim_error_inputs_func_adamw(device, dtype): + return optim_error_inputs_func_adam(device, dtype) + + +def optim_inputs_func_asgd(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"maximize": True, "capturable": True}, + desc="maximize, capturable", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "capturable": True}, + desc="weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True}, + desc="maximize, weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={ + "lr": torch.tensor(0.001), + "weight_decay": 0.1, + "maximize": True, + "capturable": True, + }, + desc="maximize, weight_decay, capturable, tensor LR", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lambd": 0.1}, desc="non-default lambd"), + OptimizerInput(params=None, kwargs={"lr": 0.02}, desc="non-default lr"), + OptimizerInput(params=None, kwargs={"t0": 100}, desc="t0"), + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize, nonzero weight_decay", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_asgd(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-0.5), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_lbfgs(device, dtype=None): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"tolerance_grad": 1e-6}, desc="tolerance_grad" + ), + OptimizerInput( + params=None, + kwargs={"line_search_fn": "strong_wolfe"}, + desc="strong_wolfe", + ), + ] + + +def optim_error_inputs_func_lbfgs(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + return error_inputs + + +def optim_inputs_func_nadam(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "momentum_decay": 6e-3, "capturable": True}, + desc="weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.9, + "momentum_decay": 6e-3, + "decoupled_weight_decay": True, + "capturable": True, + }, + desc="decoupled_weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={ + "lr": torch.tensor(0.001), + "weight_decay": 0.9, + "momentum_decay": 6e-3, + "decoupled_weight_decay": True, + "capturable": True, + }, + desc="decoupled_weight_decay, capturable", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), + OptimizerInput( + params=None, + kwargs={"momentum_decay": 6e-3}, + desc="non-zero momentum_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "momentum_decay": 6e-3}, + desc="weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.1, + "momentum_decay": 6e-3, + "decoupled_weight_decay": True, + }, + desc="decoupled_weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_nadam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum_decay=-0.2), + desc="momentum_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid momentum_decay value: -0.2", + ), + ] + return error_inputs + + +# Weird story bro, NAdam and RAdam do not have maximize. +def optim_inputs_func_radam(device=None, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={ + "capturable": True, + "weight_decay": 0.1, + }, + desc="capturable, weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "capturable": True, + "weight_decay": 0.1, + "decoupled_weight_decay": True, + }, + desc="capturable, weight_decay, decoupled_weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "lr": torch.tensor(0.001), + "capturable": True, + "weight_decay": 0.1, + "decoupled_weight_decay": True, + }, + desc="capturable, weight_decay, decoupled_weight_decay, tensor LR", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 2e-3}, desc="non-default lr"), + OptimizerInput(params=None, kwargs={"eps": 1e-6}, desc="non-default eps"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "decoupled_weight_decay": True}, + desc="decoupled_weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_radam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-1), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -1", + ), + ] + return error_inputs + + +def optim_inputs_func_rmsprop(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True}, + desc="capturable, maximize", + ), + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001), "capturable": True}, + desc="Tensor lr with capturable", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "centered": True}, + desc="centered", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "centered": True, "momentum": 0.1}, + desc="momentum", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.1, + "centered": True, + "momentum": 0.1, + "maximize": True, + }, + desc="maximize", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_rmsprop(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum=-1.0), + desc="momentum should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid momentum value: -1.0", + ), + ] + return error_inputs + + +def optim_inputs_func_rprop(device, dtype=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001), "capturable": True}, + desc="Tensor lr with capturable", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 2e-4}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"etas": (0.5, 1.5)}, desc="non-default etas" + ), + OptimizerInput( + params=None, + kwargs={"step_sizes": (2e-6, 100)}, + desc="non-default step_sizes", + ), + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_rprop(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, etas=(1.0, 0.5)), + desc="0 < eta1 < 1 < eta2", + ), + error_type=ValueError, + error_regex="Invalid eta values: 1.0, 0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_sgd(device, dtype=None): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"lr": torch.tensor(0.001)}, desc="tensor lr" + ), + OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "dampening": 0.5}, + desc="dampening", + ), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "weight_decay": 0.1}, + desc="non-zero weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "nesterov": True, "weight_decay": 0.1}, + desc="nesterov", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + + +def optim_error_inputs_func_sgd(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum=-0.5), + desc="momentum should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid momentum value: -0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_sparseadam(device, dtype=None): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput( + params=None, kwargs={"lr": 0.01}, desc="non-default lr" + ), # TODO: Move out to testing in param_group? + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + ] + + +def optim_error_inputs_func_sparseadam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[ + torch.zeros( + 3, layout=torch.sparse_coo, device=device, dtype=dtype + ) + ], + kwargs={}, + desc="dense params required", + ), + error_type=ValueError, + error_regex="SparseAdam requires dense parameter tensors", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[ + { + "params": [ + torch.zeros( + 3, + layout=torch.sparse_coo, + device=device, + dtype=dtype, + ) + ] + } + ], + kwargs={}, + desc="dense params required in param_groups", + ), + error_type=ValueError, + error_regex="SparseAdam requires dense parameter tensors", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[torch.rand(2, 3, device=device, dtype=torch.complex64)], + kwargs=dict(), + desc="complex not supported", + ), + error_type=ValueError, + error_regex="SparseAdam does not support complex parameters", + ), + ] + return error_inputs + + +def _get_device_type(device: Union[str, torch.device]) -> str: + # Returns the device type as a string, e.g., "cpu" or "cuda" + if isinstance(device, torch.device): + device = str(device.type) + assert isinstance(device, str) + return device.split(":")[0] + + +def _get_optim_inputs_including_global_cliquey_kwargs( + device, dtype, optim_info, skip=() +) -> List[OptimizerInput]: + """ + Return a list of all configs for a given optimizer as a list of OptimizerInputs, + including configs that have supported global cliquey kwargs (foreach, fused, + differentiable) based on optim_info.supported_impls. + + The configs (optim_inputs) returned by optim_info.optim_inputs_func(...) + intentionally do NOT include global cliquey kwargs to give flexibility to tests. + For example, testing correctness between toggling foreach on and off is now + trivial. That said, we sometimes want to test for all possible configs on an + optimizer including all supported flags, so this helper returns all optim inputs. + """ + assert all( + x in ["foreach", "fused", "differentiable"] for x in skip + ), "skip must be a subset of ['foreach', 'fused', 'differentiable']" + + optim_inputs = optim_info.optim_inputs_func(device) + + supported_impls = tuple( + x + for x in optim_info.supported_impls + if x not in skip + and (_get_device_type(device) in optim_info.supports_fused_on or x != "fused") + and ( + _get_device_type(device) in _get_foreach_kernels_supported_devices() + or x != "foreach" + ) + ) + + all_optim_inputs = [] + for optim_input in optim_inputs: + # Add the base config where all the flags are False + base_kwargs = deepcopy(optim_input.kwargs) + if len(supported_impls) != 0: + for flag in supported_impls: + base_kwargs[flag] = False + all_optim_inputs.append( + OptimizerInput(params=None, kwargs=base_kwargs, desc=optim_input.desc) + ) + else: + all_optim_inputs.append(optim_input) + # Add a config for when each of the global cliquey kwargs is True + # Note that in [optimizer kwarg categories], these kwargs are mutually + # exclusive, so we do not need to product them together. + for flag in supported_impls: + new_kwargs = deepcopy(base_kwargs) + new_kwargs[flag] = True + all_optim_inputs.append( + OptimizerInput( + params=None, kwargs=new_kwargs, desc=f"{optim_input.desc} & {flag}" + ) + ) + return all_optim_inputs + + +# Database of OptimizerInfo entries in alphabetical order. +optim_db: List[OptimizerInfo] = [ + OptimizerInfo( + Adadelta, + optim_inputs_func=optim_inputs_func_adadelta, + optim_error_inputs_func=optim_error_inputs_func_adadelta, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo("See #116028"), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + # Note on tolerances: + # test_correctness_Adadelta_cuda_float32 + # Mismatched elements: 10 / 100 (10.0%) + # Greatest absolute difference: 4.838220775127411e-05 at index (7, 4) (up to 1e-05 allowed) + # Greatest relative difference: 0.007270356640219688 at index (7, 2) (up to 1e-05 allowed) + # This is due to floating point ordering error + usage of sqrt + DecorateInfo( + toleranceOverride( + { + torch.float32: tol( + rtol=5.5e-4, + atol=5e-5, + ) + } + ), + "CompiledOptimizerParityTests", + "test_correctness", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + Adagrad, + optim_inputs_func=optim_inputs_func_adagrad, + optim_error_inputs_func=optim_error_inputs_func_adagrad, + supported_impls=("foreach", "differentiable", "fused"), + supports_fused_on=("cpu",), + supports_sparse=True, + metadata_for_sparse=( + {"lr": 0.1, "weight_decay": 0, "lr_decay": 0}, + [ + lambda opt: StepLR(opt, gamma=1 - 1e-5, step_size=500), + lambda opt: ReduceLROnPlateau(opt, threshold=1e-4), + ], + ), + decorators=( + DecorateInfo( + # Note on tolerances: + # difference comes from the fact that the non fused kernel have + # more dtype cast operations. We have another test test_fused_cpu_matches_cuda + # to make sure there is no discrepancies between cuda fused kernel + # and cpu fused kernel + toleranceOverride( + { + torch.bfloat16: tol(atol=5e-3, rtol=5e-3), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), + ), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo("See #116028"), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + Adam, + optim_inputs_func=optim_inputs_func_adam, + scheduler_inputs=( + [lambda opt: ExponentialLR(opt, gamma=0.9)], + [lambda opt: LinearLR(opt, start_factor=0.4, total_iters=4)], + [ + lambda opt: ConstantLR(opt, factor=0.4, total_iters=4), + lambda opt: ExponentialLR(opt, gamma=0.9), + ], + [ + lambda opt: ExponentialLR(opt, gamma=0.9), + lambda opt: ReduceLROnPlateau(opt), + ], + [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)], + [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)], + [ + lambda opt: StepLR(opt, gamma=0.9, step_size=10), + lambda opt: ReduceLROnPlateau(opt), + ], + ), + optim_error_inputs_func=optim_error_inputs_func_adam, + supported_impls=("foreach", "differentiable", "fused"), + supports_fused_on=("cpu", "cuda"), + decorators=( + # Expected floating point error between fused and compiled forloop + DecorateInfo( + toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}), + "TestOptimRenewed", + "test_fused_matches_forloop", + active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO + and kwargs["dtype"] == torch.float64, + ), + DecorateInfo( + # Note on tolerances: + # difference comes from the fact that the non fused kernel have + # more dtype cast operations. We have another test test_fused_cpu_matches_cuda + # to make sure there is no discrepancies between cuda fused kernel + # and cpu fused kernel + toleranceOverride( + { + torch.bfloat16: tol(atol=5e-3, rtol=5e-3), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), + DecorateInfo( + # Note on tolerances: + # Tracking through #127000 + toleranceOverride( + { + torch.float32: tol(atol=3e-5, rtol=1.3e-06), + } + ), + "TestCudaOptims", + "test_grad_scaling_autocast_fused_optimizers", + ), + ), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + Adamax, + optim_inputs_func=optim_inputs_func_adamax, + optim_error_inputs_func=optim_error_inputs_func_adamax, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo("Mismatched _foreach_addcdiv_ types, see #118159"), + "TestOptimRenewed", + "test_complex", + ), + DecorateInfo( + skipIfTorchDynamo("See #116028"), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + unittest.skip("Uses too much memory, even for H100, surprisingly."), + "TestOptimRenewed", + "test_foreach_large_tensor", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + AdamW, + optim_inputs_func=optim_inputs_func_adamw, + optim_error_inputs_func=optim_error_inputs_func_adamw, + supported_impls=("foreach", "differentiable", "fused"), + supports_fused_on=("cpu", "cuda"), + decorators=( + # Expected error between compiled forloop and fused optimizers + DecorateInfo( + toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}), + "TestOptimRenewed", + "test_fused_matches_forloop", + active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO + and kwargs["dtype"] == torch.float64, + ), + DecorateInfo( + toleranceOverride( + # Note on tolerances: + # difference comes from the fact that the non fused kernel have + # more dtype cast operations. We have another test test_fused_cpu_matches_cuda + # to make sure there is no discrepancies between cuda fused kernel + # and cpu fused kernel + { + torch.bfloat16: tol(atol=5e-3, rtol=5e-3), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), + # Note on tolerances: + # Tracking through #127000 + DecorateInfo( + toleranceOverride( + { + torch.float32: tol( + atol=3e-5, + rtol=1.3e-06, + ) + } + ), + "TestCudaOptims", + "test_grad_scaling_autocast_fused_optimizers", + ), + ), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + ASGD, + optim_inputs_func=optim_inputs_func_asgd, + optim_error_inputs_func=optim_error_inputs_func_asgd, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1.5e-5, rtol=1e-5), + } + ), + "TestOptimRenewed", + "test_step_is_noop_for_zero_grads", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + DecorateInfo( + unittest.skip( + "ASGD internally changes the weights even with zero grad" + ), + "TestOptimRenewed", + "test_step_is_noop_for_zero_grads", + ), + ), + ), + OptimizerInfo( + LBFGS, + optim_inputs_func=optim_inputs_func_lbfgs, + optim_error_inputs_func=optim_error_inputs_func_lbfgs, + supported_impls=(), + step_requires_closure=True, + supports_param_groups=False, + supports_multiple_devices=False, + skips=( + # Fails on MacOS 13.2.1 in CI https://github.com/pytorch/pytorch/issues/117094 + DecorateInfo( + skipIfMps, "TestOptimRenewed", "test_can_load_older_state_dict" + ), + DecorateInfo( + toleranceOverride( + { + torch.complex64: tol( + rtol=4.5e-5, + atol=5e-5, + ) + } + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + unittest.skip("Does not support param groups"), + "TestOptimRenewed", + "test_param_groups_lr", + ), + DecorateInfo( + unittest.skip("Does not support param groups"), + "TestOptimRenewed", + "test_param_groups_weight_decay", + ), + DecorateInfo( + unittest.skip("LBFGS doesn't support multidevice"), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + unittest.skip("Does not support param groups"), + "TestOptimRenewed", + "test_param_group_with_lrscheduler_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + ), + ), + OptimizerInfo( + NAdam, + optim_inputs_func=optim_inputs_func_nadam, + optim_error_inputs_func=optim_error_inputs_func_nadam, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors, https://github.com/pytorch/pytorch/issues/117150" + ), + "TestOptimRenewed", + "test_load_nontensor_step", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + RAdam, + optim_inputs_func=optim_inputs_func_radam, + optim_error_inputs_func=optim_error_inputs_func_radam, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { + # previously atol=1e-7, rtol=1e-7 + torch.float64: tol(atol=1.5e-7, rtol=1.1e-7) + } + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + RMSprop, + optim_inputs_func=optim_inputs_func_rmsprop, + optim_error_inputs_func=optim_error_inputs_func_rmsprop, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo("See #116028"), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 + torch.float32: tol(atol=5e-04, rtol=0.01), + } + ), + "TestOptimRenewed", + "test_mixed_device_dtype", + active_if=TEST_WITH_TORCHDYNAMO, + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + Rprop, + optim_inputs_func=optim_inputs_func_rprop, + optim_error_inputs_func=optim_error_inputs_func_rprop, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # Rprop doesn't update for non-contiguous, see #118117 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo("See #116028"), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + SGD, + optim_inputs_func=optim_inputs_func_sgd, + scheduler_inputs=( + [lambda opt: StepLR(opt, gamma=0.9, step_size=10)], + [ + lambda opt: LinearLR( + opt, start_factor=0.4, end_factor=0.8, total_iters=4 + ) + ], + [ + lambda opt: StepLR(opt, gamma=0.9, step_size=10), + lambda opt: LinearLR( + opt, start_factor=0.4, end_factor=0.6, total_iters=4 + ), + ], + [ + lambda opt: StepLR(opt, gamma=0.99, step_size=10), + lambda opt: ExponentialLR(opt, gamma=0.99), + lambda opt: ReduceLROnPlateau(opt), + ], + [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)], + [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)], + [ + lambda opt: StepLR(opt, gamma=0.9, step_size=10), + lambda opt: ReduceLROnPlateau(opt), + ], + ), + optim_error_inputs_func=optim_error_inputs_func_sgd, + supported_impls=("foreach", "differentiable", "fused"), + supports_sparse=True, + metadata_for_sparse=( + { + "lr": 4.8e-3, + "maximize": False, + "momentum": 0, + "nesterov": False, + "weight_decay": 0, + }, + [lambda opt: StepLR(opt, gamma=0.99999, step_size=300)], + ), + supports_fused_on=( + "cpu", + "cuda", + ), + skips=( + DecorateInfo( + skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), + "TestOptimRenewed", + "test_tensor_lr", + active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7), + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 + torch.float32: tol(atol=5e-04, rtol=0.007), + } + ), + "TestOptimRenewed", + "test_mixed_device_dtype", + active_if=TEST_WITH_TORCHDYNAMO, + ), + DecorateInfo( + skipIfTorchDynamo( + "This test uses mocks, which dynamo does not support" + ), + "TestOptimRenewed", + "test_defaults_changed_to_foreach", + ), + ), + ), + OptimizerInfo( + SparseAdam, + optim_inputs_func=optim_inputs_func_sparseadam, + optim_error_inputs_func=optim_error_inputs_func_sparseadam, + supported_impls=(), + only_supports_sparse_grads=True, + metadata_for_sparse=({"lr": 4e-2}, []), + supports_complex=False, # Missing complex support, see #118153 + skips=( + DecorateInfo( + skipIfMps, # SparseAdam does not support MPS + "TestOptimRenewed", + ), + DecorateInfo( + unittest.skip( + "SparseAdam does not support dense gradients, see #116507" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_param_groups_lr", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_tensor_lr", + ), + DecorateInfo( + unittest.skip( + "SparseAdam does not support dense gradients, see #116507" + ), + "TestOptimRenewed", + "test_can_load_older_state_dict", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_load_nontensor_step", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_param_group_with_lrscheduler_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), +] + + +class TensorTracker: + """ + A utility to track tensor clones in a list, with the expectation of popping them later (in + order) to make fair comparisons between two multi-step computation. The intended use case is + usually when comparing two supposed equal computations, such as an optimizer step that each + individually consists of multiple steps, where numerical deviation could multiply. + + The goal is to be able to compare and align numbers at every milestone so as to minimize + numerical discrepancies, and so when the test fails, it is likely a real problem. + """ + + def __init__(self, assert_eq_kwargs=None): + if assert_eq_kwargs is None: + assert_eq_kwargs = {} + self.assert_eq_kwargs = assert_eq_kwargs + self.tensors = [] + + def add(self, tensor): + """ + Add a clone().detach()'d version of the tensor + """ + self.tensors.append(tensor.clone().detach()) + + # pops from beginning, like a queue and not a stack! + def pop_check_set(self, tensor_to_set, testcase): + """ + Pop the first element in the tensor tracker, assert equality between the popped tensor and + the input tensor, and then set the input tensor to have the same values as the popped tensor + (with copy_). + """ + testcase.assertGreater(len(self.tensors), 0, "no tensors to pop") + ref = self.tensors.pop(0) + + testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}") + testcase.assertEqual(tensor_to_set, ref, **self.assert_eq_kwargs) + + with torch.no_grad(): + tensor_to_set.copy_(ref) + + def all_popped(self): + return len(self.tensors) == 0 diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..0c27032b9871b88d8ba9c71966d4fd1ad00c10d2 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py @@ -0,0 +1,2935 @@ +# mypy: ignore-errors + +r"""Importing this file includes common utility methods and base clases for +checking quantization api and properties of resulting modules. +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +from torch.ao.nn.intrinsic import _FusedModule +import torch.distributed as dist +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM + +from torch._export import capture_pre_autograd_graph +from torch.ao.quantization import ( + QuantType, + default_dynamic_qat_qconfig, + default_embedding_qat_qconfig, + default_symmetric_qnnpack_qat_qconfig, +) +from torch.ao.quantization.quantize_pt2e import ( + _convert_to_reference_decomposed_fx, + convert_pt2e, + prepare_pt2e, + prepare_qat_pt2e, +) +from torch.ao.quantization.backend_config import ( + get_executorch_backend_config, +) +from torch.ao.quantization.quantizer.xnnpack_quantizer import ( + XNNPACKQuantizer, + get_symmetric_quantization_config, +) +from torch.ao.quantization import QuantWrapper, QuantStub, DeQuantStub, \ + default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \ + propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \ + get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize, \ + QConfigMapping, get_default_qconfig_mapping, get_default_qat_qconfig_mapping +from torch.ao.quantization.quantization_mappings import ( + get_default_dynamic_quant_module_mappings, + get_default_qconfig_propagation_list, + get_default_qat_module_mappings, +) +from torch.testing._internal.common_quantized import ( + override_quantized_engine, +) +from torch.jit.mobile import _load_for_lite_interpreter + +try: + # graph mode quantization based on fx + from torch.ao.quantization.quantize_fx import ( + prepare_fx, + prepare_qat_fx, + convert_fx, + convert_to_reference_fx, + ) + from torch.ao.ns.fx.ns_types import NSSingleResultValuesType, NSSubgraph + from torch.fx.graph import Node + from torch.fx import GraphModule + HAS_FX = True +except ImportError: + HAS_FX = False + +import copy +import io +import functools +import time +import os + +import unittest +import numpy as np +from torch.testing import FileCheck +from typing import Callable, Tuple, Dict, Any, Union, Type, Optional +import torch._dynamo as torchdynamo + +class NodeSpec: + ''' Used for checking GraphModule Node + ''' + def __init__(self, op, target): + ''' + op: call_function | call_module + target: + for call_function, target would be a function + for call_module, target would be the type of PyTorch module + ''' + self.op = op + self.target = target + + @classmethod + def call_function(cls, target): + return NodeSpec('call_function', target) + + @classmethod + def call_method(cls, target): + return NodeSpec('call_method', target) + + @classmethod + def call_module(cls, target): + return NodeSpec('call_module', target) + + def __hash__(self): + return hash((self.op, self.target)) + + def __eq__(self, other): + if not isinstance(other, NodeSpec): + return NotImplemented + + return self.op == other.op and self.target == other.target + + def __repr__(self): + return repr(self.op) + " " + repr(self.target) + +def get_supported_device_types(): + return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu'] + +def test_only_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for inp in calib_data: + output = model(*inp) + +_default_loss_fn = torch.nn.CrossEntropyLoss() +def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn): + r""" + Default train function takes a torch.utils.data.Dataset and train the model + on the dataset + """ + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + train_loss, correct, total = 0, 0, 0 + for i in range(10): + model.train() + + for data, target in train_data: + optimizer.zero_grad() + output = model(data) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + train_loss += loss.item() + _, predicted = torch.max(output, 1) + total += target.size(0) + correct += (predicted == target).sum().item() + return train_loss, correct, total + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches): + model.train() + cnt = 0 + for image, target in data_loader: + start_time = time.time() + print('.', end='') + cnt += 1 + image, target = image.to(device), target.to(device) + output = model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + if cnt >= ntrain_batches: + return + return + +def ddp_setup(rank, world_size): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + + # initialize the process group + dist.init_process_group("gloo", rank=rank, world_size=world_size) + +def ddp_cleanup(): + dist.destroy_process_group() + +def run_ddp(rank, world_size, prepared): + ddp_setup(rank, world_size) + prepared.cuda() + prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank]) + prepared.to(rank) + model_with_ddp = prepared + optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001) + train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1) # noqa: F821 + ddp_cleanup() + + +def convert_dynamic(module): + convert(module, get_default_dynamic_quant_module_mappings(), inplace=True) + +def prepare_dynamic(model, qconfig_dict=None): + propagate_qconfig_(model, qconfig_dict) + +def _make_conv_test_input( + batch_size, in_channels_per_group, input_feature_map_size, + out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale, + W_zero_point, use_bias, use_channelwise, +): + in_channels = in_channels_per_group * groups + out_channels = out_channels_per_group * groups + + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, X_value_max, + (batch_size, in_channels,) + input_feature_map_size) + X = X_scale * (X_init - X_zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8) + + W_scale = W_scale * out_channels + W_zero_point = W_zero_point * out_channels + # Resize W_scale and W_zero_points arrays equal to out_channels + W_scale = W_scale[:out_channels] + W_zero_point = W_zero_point[:out_channels] + # For testing, we use small values for weights and for activations so that + # no overflow occurs in vpmaddubsw instruction. If the overflow occurs in + # qconv implementation and if there is no overflow. + # In reference we can't exactly match the results with reference. + # Please see the comment in qconv implementation file + # aten/src/ATen/native/quantized/cpu/qconv.cpp for more details. + (W_value_min, W_value_max) = (-5, 5) + # The operator expects them in the format + # (out_channels, in_channels/groups,) + kernel_size + W_init = torch.randint( + W_value_min, W_value_max, + (out_channels, in_channels_per_group,) + kernel_size) + b_init = torch.randint(0, 10, (out_channels,)) + + if use_channelwise: + W_shape = (-1, 1) + (1,) * len(kernel_size) + W_scales_tensor = torch.tensor(W_scale, dtype=torch.float) + W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float) + W = W_scales_tensor.reshape(*W_shape) * ( + W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float() + b = X_scale * W_scales_tensor * b_init.float() + W_q = torch.quantize_per_channel( + W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0, + dtype=torch.qint8) + else: + W = W_scale[0] * (W_init - W_zero_point[0]).float() + b = X_scale * W_scale[0] * b_init.float() + W_q = torch.quantize_per_tensor( + W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8) + + return (X, X_q, W, W_q, b if use_bias else None) + +def _make_conv_add_extra_input_tensor(scale, zero_point, sizes): + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, + X_value_max, + sizes # Infer the size of tensor to do the add + ) + X = scale * (X_init - zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=scale, zero_point=zero_point, dtype=torch.quint8) + return X, X_q + +def skipIfNoFBGEMM(fn): + reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.' + if isinstance(fn, type): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoQNNPACK(fn): + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.onnx._CAFFE2_ATEN_FALLBACK: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def withQNNPACKBackend(fn): + # TODO(future PR): consider combining with skipIfNoQNNPACK, + # will require testing of existing callsites + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + with override_quantized_engine('qnnpack'): + fn(*args, **kwargs) + + return wrapper + +def skipIfNoONEDNN(fn): + reason = 'Quantized operations require ONEDNN.' + if isinstance(fn, type): + if 'onednn' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'onednn' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoONEDNNBF16(fn): + reason = 'Quantized operations require BF16 support.' + if isinstance(fn, type): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoX86(fn): + reason = 'Quantized operations require X86.' + if isinstance(fn, type): + if 'x86' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'x86' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoDynamoSupport(fn): + reason = "dynamo doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_dynamo_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_dynamo_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoInductorSupport(fn): + reason = "inductor doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_inductor_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_inductor_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +try: + import torchvision # noqa: F401 + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False +skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") + +def get_script_module(model, tracing, data): + return torch.jit.trace(model, data) if tracing else torch.jit.script(model) + +def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True): + """ + Convert lengths to offsets for embedding_bag + """ + tt = np.zeros((t.shape[0] + 1,), dtype=offset_type) + tt[1:] = t + tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type)) + if use_begin_offset: + return tt[:-1] + return tt[1:] + + +def _group_quantize_tensor(w, n_bit=4, q_group_size=16): + assert w.dim() == 2 + w = w.transpose(0, 1).contiguous() + assert q_group_size > 1 + assert w.shape[-1] % q_group_size == 0 + + to_quant = w.reshape(-1, q_group_size) + assert torch.isnan(to_quant).sum() == 0 + + max_val = to_quant.amax(dim=1, keepdim=True) + min_val = to_quant.amin(dim=1, keepdim=True) + max_int = 2 ** n_bit - 1 + min_int = 0 + scales = (max_val - min_val).clamp(min=1e-6) / max_int + assert torch.isnan(scales).sum() == 0 + + zeros = min_val + scales * (2 ** (n_bit - 1)) + assert torch.isnan(zeros).sum() == 0 + + out = to_quant.sub(min_val).div(scales).round().clamp_(min_int, max_int) + assert torch.isnan(out).sum() == 0 + + out = out.to(dtype=torch.int32).reshape(w.shape) + + # Scales and zeros for the same q-group should be contiguous, so we can + # load as a 32-bit word + scales = scales.view(w.shape[0], -1) + zeros = zeros.view(w.shape[0], -1) + scales_and_zeros = ( + torch.cat( + [ + scales.reshape(scales.size(0), scales.size(1), 1), + zeros.reshape(zeros.size(0), zeros.size(1), 1), + ], + 2, + ).transpose(0, 1).contiguous() + ) + + return out, scales_and_zeros + + +def _dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype): + # source: https://github.com/pytorch-labs/gpt-fast/blob/main/quantize.py + # default setup for affine quantization of activations + x_dtype = x.dtype + x = x.float() + eps = torch.finfo(torch.float32).eps + + # get min and max + min_val, max_val = torch.aminmax(x, dim=1) + + # calculate scales and zero_points based on min and max + # reference: https://fburl.com/code/srbiybme + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + device = min_val_neg.device + + # reference: https://fburl.com/code/4wll53rk + max_val_pos = torch.max(-min_val_neg, max_val_pos) + scales = max_val_pos / (float(quant_max - quant_min) / 2) + # ensure scales is the same dtype as the original tensor + scales = torch.clamp(scales, min=eps).to(x.dtype) + zero_points = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device) + + # quantize based on qmin/qmax/scales/zp + x_div = x / scales.unsqueeze(-1) + x_round = torch.round(x_div) + x_zp = x_round + zero_points.unsqueeze(-1) + quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype) + + return quant, scales.to(x_dtype), zero_points + + + +# QuantizationTestCase used as a base class for testing quantization on modules +class QuantizationTestCase(TestCase): + def setUp(self): + super().setUp() + self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)] + self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)] + self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)] + for _ in range(2)] + self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + + self.img_data_dict = {1 : self.img_data_1d, + 2 : self.img_data_2d, + 3 : self.img_data_3d} + + # Quant types that produce statically quantized ops + self.static_quant_types = [QuantType.STATIC, QuantType.QAT] + # All quant types for (fx based) graph mode quantization + self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT] + + def checkNoPrepModules(self, module): + r"""Checks the module does not contain child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertFalse(hasattr(module, 'quant')) + self.assertFalse(hasattr(module, 'dequant')) + + def checkNoQconfig(self, module): + r"""Checks the module does not contain qconfig + """ + self.assertFalse(hasattr(module, 'qconfig')) + + for child in module.children(): + self.checkNoQconfig(child) + + def checkHasPrepModules(self, module): + r"""Checks the module contains child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertTrue(hasattr(module, 'module')) + self.assertTrue(hasattr(module, 'quant')) + self.assertTrue(hasattr(module, 'dequant')) + + def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None): + r"""Checks the module or module's leaf descendants + have observers in preparation for quantization + """ + if propagate_qconfig_list is None: + propagate_qconfig_list = get_default_qconfig_propagation_list() + if prepare_custom_config_dict is None: + prepare_custom_config_dict = {} + float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {}) + + # check if a module is a leaf module, ignoring activation_post_process attribute + def is_leaf_module(module): + submodule_name_count = 0 + for name, _ in module.named_children(): + if name != 'activation_post_process': + submodule_name_count += 1 + return submodule_name_count == 0 + + if hasattr(module, 'qconfig') and module.qconfig is not None and \ + ((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential) + and type(module) in propagate_qconfig_list) or + type(module) in float_to_observed_module_class_mapping.keys()) and \ + not isinstance(module, torch.ao.quantization.DeQuantStub): + self.assertTrue(hasattr(module, 'activation_post_process'), + 'module: ' + str(type(module)) + ' do not have observer') + # we don't need to check observers for child modules of the + # qat modules + if type(module) not in get_default_qat_module_mappings().values() and \ + type(module) not in float_to_observed_module_class_mapping.values() and \ + not isinstance(module, _FusedModule): + for child in module.children(): + if type(child) in [nn.Dropout]: + continue + self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict) + + def checkQuantDequant(self, mod): + r"""Checks that mod has nn.Quantize and + nn.DeQuantize submodules inserted + """ + self.assertEqual(type(mod.quant), nnq.Quantize) + self.assertEqual(type(mod.dequant), nnq.DeQuantize) + + def checkWrappedQuantizedLinear(self, mod): + r"""Checks that mod has been swapped for an nnq.Linear + module, the bias is qint32, and that the module + has Quantize and DeQuantize submodules + """ + self.assertEqual(type(mod.module), nnq.Linear) + self.checkQuantDequant(mod) + + def checkQuantizedLinear(self, mod): + self.assertEqual(type(mod), nnq.Linear) + + def checkDynamicQuantizedLinear(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nnqd.Linear) + self.assertEqual(mod._packed_params.dtype, dtype) + + def checkDynamicQuantizedLinearRelu(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nniqd.LinearReLU) + self.assertEqual(mod._packed_params.dtype, dtype) + + def check_eager_serialization(self, ref_model, loaded_model, x): + # Check state dict serialization and torch.save APIs + model_dict = ref_model.state_dict() + b = io.BytesIO() + torch.save(model_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + loaded_model.load_state_dict(loaded_dict) + ref_out = ref_model(*x) + load_out = loaded_model(*x) + + def check_outputs(ref_out, load_out): + self.assertEqual(ref_out[0], load_out[0]) + if isinstance(ref_out[1], tuple): + self.assertEqual(ref_out[1][0], load_out[1][0]) + self.assertEqual(ref_out[1][1], load_out[1][1]) + else: + self.assertEqual(ref_out[1], load_out[1]) + + check_outputs(ref_out, load_out) + b = io.BytesIO() + torch.save(ref_model, b) + b.seek(0) + loaded = torch.load(b) + load_out = loaded(*x) + check_outputs(ref_out, load_out) + + def check_weight_bias_api(self, ref_model, weight_keys, bias_keys): + weight = ref_model.get_weight() + bias = ref_model.get_bias() + self.assertEqual(weight_keys ^ weight.keys(), set()) + self.assertEqual(bias_keys ^ bias.keys(), set()) + + def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.LSTM type + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkLinear(self, mod): + self.assertEqual(type(mod), torch.nn.Linear) + + def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + if hasattr(mod, '_all_weight_values'): + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkScriptable(self, orig_mod, calib_data, check_save_load=False): + scripted = torch.jit.script(orig_mod) + self._checkScriptable(orig_mod, scripted, calib_data, check_save_load) + + # Use first calib_data entry as trace input + traced = torch.jit.trace(orig_mod, calib_data[0]) + self._checkScriptable(orig_mod, traced, calib_data, check_save_load) + + # Call this twice: once for a scripted module and once for a traced module + def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load): + self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data) + + # Test save/load + buffer = io.BytesIO() + torch.jit.save(script_mod, buffer) + + buffer.seek(0) + loaded_mod = torch.jit.load(buffer) + # Pending __get_state_ and __set_state__ support + # See tracking task https://github.com/pytorch/pytorch/issues/23984 + if check_save_load: + self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data) + + def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data): + for inp in calib_data: + ref_output = orig_mod(*inp) + scripted_output = test_mod(*inp) + self.assertEqual(scripted_output, ref_output) + + + def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False, + check=True, eval_mode=True, dynamic=False, qconfig=None): + if debug: + print('Testing:', str(module)) + qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)} + + if eval_mode: + module = module.eval() + if dynamic: + qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig} + model = get_script_module(module, tracing, inputs[0]).eval() + if debug: + print('input graph:', model.graph) + models = {} + outputs = {} + for debug in [True, False]: + if dynamic: + models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug) + # make sure it runs + outputs[debug] = models[debug](inputs) + else: + # module under test can contain in-place ops, and we depend on + # input data staying constant for comparisons + inputs_copy = copy.deepcopy(inputs) + models[debug] = quantize_jit( + model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False, + debug=debug) + # make sure it runs + outputs[debug] = models[debug](*inputs[0]) + + if debug: + print('debug graph:', models[True].graph) + print('non debug graph:', models[False].graph) + + if check: + # debug and non-debug option should have the same numerics + self.assertEqual(outputs[True], outputs[False]) + + # non debug graph should produce quantized op + FileCheck().check(quantized_op) \ + .run(models[False].graph) + + return models[False] + + def checkGraphModuleNodes( + self, graph_module, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None): + """ Check if GraphModule contains the target node + Args: + graph_module: the GraphModule instance we want to check + expected_node, expected_node_occurrence, expected_node_list: + see docs for checkGraphModeFxOp + """ + nodes_in_graph = {} + node_list = [] + modules = dict(graph_module.named_modules(remove_duplicate=False)) + for node in graph_module.graph.nodes: + n = None + if node.op == 'call_function' or node.op == 'call_method': + n = NodeSpec(node.op, node.target) + elif node.op == 'call_module': + n = NodeSpec(node.op, type(modules[node.target])) + + if n is not None: + node_list.append(n) + if n in nodes_in_graph: + nodes_in_graph[n] += 1 + else: + nodes_in_graph[n] = 1 + + if expected_node is not None: + self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) + + ' not found in the graph module') + + if expected_node_occurrence is not None: + for expected_node, occurrence in expected_node_occurrence.items(): + if occurrence != 0: + self.assertTrue( + expected_node in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' not found') + self.assertTrue( + nodes_in_graph[expected_node] == occurrence, + 'Check failed for node:' + str(expected_node) + + ' Expected occurrence:' + str(occurrence) + + ' Found occurrence:' + str(nodes_in_graph[expected_node])) + else: + self.assertTrue( + expected_node not in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' expected no occurrence but found') + + if expected_node_list is not None: + cur_index = 0 + for n in node_list: + if cur_index == len(expected_node_list): + return + if n == expected_node_list[cur_index]: + cur_index += 1 + self.assertTrue( + cur_index == len(expected_node_list), + "Check failed for graph:" + + self.printGraphModule(graph_module, print_str=False) + + "Expected ordered list:" + + str(expected_node_list)) + + def printGraphModule(self, graph_module, print_str=True): + modules = dict(graph_module.named_modules(remove_duplicate=False)) + node_infos = [] + for n in graph_module.graph.nodes: + node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs])) + if n.op == 'call_module': + node_info += ' module type: ' + repr(type(modules[n.target])) + node_infos.append(node_info) + str_to_print = '\n'.join(node_infos) + if print_str: + print(str_to_print) + return str_to_print + + if HAS_FX: + + def assert_types_for_matched_subgraph_pairs( + self, + matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]], + expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]], + gm_a: GraphModule, + gm_b: GraphModule, + ) -> None: + """ + Verifies that the types specified in expected_types match + the underlying objects pointed to by the nodes in matched_subgraph_pairs. + + An example successful test case: + + matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)} + expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)} + + The function tests for key equivalence, and verifies types with + instance checks. + """ + + def _get_underlying_op_type( + node: Node, gm: GraphModule + ) -> Union[Callable, str]: + if node.op == 'call_module': + mod = getattr(gm, node.target) + return type(mod) + else: + assert node.op in ('call_function', 'call_method') + return node.target + + self.assertTrue( + len(matched_subgraph_pairs) == len(expected_types), + f'Expected length of results to match, but got {len(matched_subgraph_pairs)} and {len(expected_types)}' + ) + for k, v in expected_types.items(): + expected_types_a, expected_types_b = v + exp_type_start_a, exp_type_end_a = expected_types_a + exp_type_start_b, exp_type_end_b = expected_types_b + subgraph_a, subgraph_b = matched_subgraph_pairs[k] + + act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a) + act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b) + act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a) + act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b) + types_match = (exp_type_start_a is act_type_start_a) and \ + (exp_type_end_a is act_type_end_a) and \ + (exp_type_start_b is act_type_start_b) and \ + (exp_type_end_b is act_type_end_b) + self.assertTrue( + types_match, + f'Type mismatch at {k}: expected {(exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b)}, ' + f'got {(act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b)}' + ) + + def assert_ns_compare_dict_valid( + self, + act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]], + ) -> None: + """ + Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid: + 1. for each layer, results are recorded for two models + 2. number of seen tensors match + 3. shapes of each pair of seen tensors match + """ + for layer_name, result_type_to_data in act_compare_dict.items(): + for result_type, layer_data in result_type_to_data.items(): + self.assertTrue( + len(layer_data) == 2, + f"Layer {layer_name} does not have exactly two model results.") + model_name_0, model_name_1 = layer_data.keys() + for res_idx in range(len(layer_data[model_name_0])): + layer_data_0 = layer_data[model_name_0][res_idx] + layer_data_1 = layer_data[model_name_1][res_idx] + self.assertTrue( + layer_data_0['type'] == layer_data_0['type'], + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.") + + self.assertTrue( + len(layer_data_0['values']) == + len(layer_data_1['values']), + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.") + + # F.conv1d weight has rank 3, and toq.conv1d unpacked weight + # has rank 4. For now, skip the length check for conv1d only. + is_weight_functional_conv1d = ( + result_type == NSSingleResultValuesType.WEIGHT.value and + ( + 'conv1d' in layer_data_0['prev_node_target_type'] or + 'conv1d' in layer_data_1['prev_node_target_type'] + ) + ) + if not is_weight_functional_conv1d: + for idx in range(len(layer_data_0['values'])): + values_0 = layer_data_0['values'][idx] + values_1 = layer_data_1['values'][idx] + if isinstance(values_0, torch.Tensor): + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + elif isinstance(values_0, list): + values_0 = values_0[0] + values_1 = values_1[0] + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + else: + assert isinstance(values_0, tuple), \ + f"unhandled type {type(values_0)}" + assert len(values_0) == 2 + assert len(values_0[1]) == 2 + assert values_0[0].shape == values_1[0].shape + assert values_0[1][0].shape == values_1[1][0].shape + assert values_0[1][1].shape == values_1[1][1].shape + + # verify that ref_node_name is valid + ref_node_name_0 = layer_data_0['ref_node_name'] + ref_node_name_1 = layer_data_1['ref_node_name'] + prev_node_name_0 = layer_data_0['prev_node_name'] + prev_node_name_1 = layer_data_1['prev_node_name'] + if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value: + self.assertTrue(ref_node_name_0 == prev_node_name_0) + self.assertTrue(ref_node_name_1 == prev_node_name_1) + elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value: + self.assertTrue(ref_node_name_0 != prev_node_name_0) + self.assertTrue(ref_node_name_1 != prev_node_name_1) + + def checkGraphModeFxOp( + self, + model, + inputs, + quant_type, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None, + is_reference=False, + print_debug_info=False, + custom_qconfig_dict=None, + prepare_expected_node=None, + prepare_expected_node_occurrence=None, + prepare_expected_node_list=None, + prepare_custom_config=None, + backend_config=None): + """ Quantizes model with graph mode quantization on fx and check if the + quantized model contains the quantized_node + + Args: + model: floating point torch.nn.Module + inputs: one positional sample input arguments for model + expected_node: NodeSpec + e.g. NodeSpec.call_function(torch.quantize_per_tensor) + expected_node_occurrence: a dict from NodeSpec to + expected number of occurrences (int) + e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1, + NodeSpec.call_method('dequantize'): 1} + expected_node_list: a list of NodeSpec, used to check the order + of the occurrence of Node + e.g. [NodeSpec.call_function(torch.quantize_per_tensor), + NodeSpec.call_module(nnq.Conv2d), + NodeSpec.call_function(F.hardtanh_), + NodeSpec.call_method('dequantize')] + is_reference: if True, enables reference mode + print_debug_info: if True, prints debug info + custom_qconfig_dict: overrides default qconfig_dict + prepare_expected_node: same as expected_node, but for prepare + prepare_expected_node_occurrence: same as + expected_node_occurrence, but for prepare + prepare_expected_node_list: same as expected_node_list, but + for prepare + + Returns: + A dictionary with the following structure: + { + "prepared": ..., # the prepared model + "quantized": ..., # the quantized non-reference model + "quantized_reference": ..., # the quantized reference model + "result": ..., # the result for either quantized or + # quantized_reference model depending on the + # is_reference argument + } + """ + # TODO: make img_data a single example instead of a list + if type(inputs) == list: + inputs = inputs[0] + + if quant_type == QuantType.QAT: + qconfig_mapping = get_default_qat_qconfig_mapping(torch.backends.quantized.engine) + model.train() + elif quant_type == QuantType.STATIC: + qconfig_mapping = get_default_qconfig_mapping(torch.backends.quantized.engine) + model.eval() + else: + qconfig = default_dynamic_qconfig + qconfig_mapping = QConfigMapping().set_global(qconfig) + model.eval() + + if quant_type == QuantType.QAT: + prepare = prepare_qat_fx + else: + prepare = prepare_fx + + # overwrite qconfig_dict with custom_qconfig_dict + if custom_qconfig_dict is not None: + assert type(custom_qconfig_dict) in (QConfigMapping, dict), \ + 'custom_qconfig_dict should be a QConfigMapping or a dict' + if isinstance(custom_qconfig_dict, QConfigMapping): + qconfig_mapping = custom_qconfig_dict + else: + qconfig_mapping = QConfigMapping.from_dict(custom_qconfig_dict) + prepared = prepare( + model, qconfig_mapping, + example_inputs=inputs, + prepare_custom_config=prepare_custom_config, + backend_config=backend_config) + if not quant_type == QuantType.DYNAMIC: + prepared(*inputs) + + if print_debug_info: + print() + print('quant type:\n', quant_type) + print('original model:\n', model) + print() + print('prepared model:\n', prepared) + + self.checkGraphModuleNodes( + prepared, prepare_expected_node, + prepare_expected_node_occurrence, prepare_expected_node_list) + + prepared_copy = copy.deepcopy(prepared) + qgraph = convert_fx(copy.deepcopy(prepared)) + qgraph_reference = convert_to_reference_fx(copy.deepcopy(prepared)) + result = qgraph(*inputs) + result_reference = qgraph_reference(*inputs) + qgraph_copy = copy.deepcopy(qgraph) + qgraph_reference_copy = copy.deepcopy(qgraph_reference) + + qgraph_to_check = qgraph_reference if is_reference else qgraph + if print_debug_info: + print() + print('quantized model:\n', qgraph_to_check) + self.printGraphModule(qgraph_to_check) + print() + self.checkGraphModuleNodes( + qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list) + return {"prepared": prepared_copy, + "quantized": qgraph_copy, + "quantized_reference": qgraph_reference_copy, + "quantized_output": result, + "quantized_reference_output": result_reference} + + + def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets, + set_qconfig, is_emb_bag, dtype=torch.quint8): + # Test serialization of dynamic EmbeddingBag module using state_dict + if is_emb_bag: + inputs = [indices, offsets] + else: + inputs = [indices] + emb_dict = qemb.state_dict() + b = io.BytesIO() + torch.save(emb_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + embedding_unpack = torch.ops.quantized.embedding_bag_unpack + # Check unpacked weight values explicitly + for key in emb_dict: + if isinstance(emb_dict[key], torch._C.ScriptObject): + assert isinstance(loaded_dict[key], torch._C.ScriptObject) + emb_weight = embedding_unpack(emb_dict[key]) + loaded_weight = embedding_unpack(loaded_dict[key]) + self.assertEqual(emb_weight, loaded_weight) + + # Check state dict serialization and torch.save APIs + if is_emb_bag: + loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, mode='sum', dtype=dtype) + else: + loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype) + self.check_eager_serialization(qemb, loaded_qemb, inputs) + + loaded_qemb.load_state_dict(loaded_dict) + self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight), + embedding_unpack(loaded_qemb._packed_params._packed_weight)) + + + # Test JIT serialization + self.checkScriptable(qemb, [inputs], check_save_load=True) + + # Test from_float call + if is_emb_bag: + float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + else: + float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim) + + if set_qconfig: + float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype, + qscheme=torch.per_channel_affine_float_qparams, + ch_axis=0) + float_embedding.qconfig = QConfig(activation=default_dynamic_quant_observer, + weight=float_qparams_observer) + + prepare_dynamic(float_embedding) + + float_embedding(*inputs) + if is_emb_bag: + q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding) + expected_name = "QuantizedEmbeddingBag" + else: + q_embeddingbag = nnq.Embedding.from_float(float_embedding) + expected_name = "QuantizedEmbedding" + + q_embeddingbag(*inputs) + + self.assertTrue(expected_name in str(q_embeddingbag)) + +class QuantizationLiteTestCase(QuantizationTestCase): + def _create_quantized_model(self, model_class: Type[torch.nn.Module], **kwargs): + # Creates quantized model for testing mobile script modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + qconfig = torch.ao.quantization.get_default_qconfig(qengine) + model = model_class(**kwargs) + model = quantize(model, test_only_eval_fn, [self.calib_data]) + + return model + + def _compare_script_and_mobile(self, + model: torch.nn.Module, + input: torch.Tensor): + # Compares the numerical outputs for script and lite modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + script_module = torch.jit.script(model) + script_module_result = script_module(input) + + max_retry = 5 + for retry in range(1, max_retry + 1): + # retries `max_retry` times; breaks iff succeeds else throws exception + try: + buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter()) + buffer.seek(0) + mobile_module = _load_for_lite_interpreter(buffer) + + mobile_module_result = mobile_module(input) + + torch.testing.assert_close(script_module_result, mobile_module_result) + mobile_module_forward_result = mobile_module.forward(input) + torch.testing.assert_close(script_module_result, mobile_module_forward_result) + + mobile_module_run_method_result = mobile_module.run_method("forward", input) + torch.testing.assert_close(script_module_result, mobile_module_run_method_result) + except AssertionError as e: + if retry == max_retry: + raise e + else: + continue + break + + +class PT2EQuantizationTestCase(QuantizationTestCase): + """ + Base QuantizationTestCase for PT2 with some helper methods. + """ + _MAP_TO_FX_TRACED_OPS = { + torch.ops.quantized_decomposed.quantize_per_tensor: torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.quantize_per_channel: torch.ops.quantized_decomposed.quantize_per_channel.default, + torch.ops.quantized_decomposed.dequantize_per_channel: torch.ops.quantized_decomposed.dequantize_per_channel.default, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + } + + def _test_quantizer( + self, + model, + example_inputs, + quantizer, + expected_node_occurrence, + expected_node_list=None, + check_against_fx_quant=False, + fx_qconfig_mapping=None, + export_with_dynamic_shape=False, + is_qat=False, + is_debug_mode=False, + ): + # resetting dynamo cache + torch._dynamo.reset() + m_eager = model.eval() + + # program capture + m = copy.deepcopy(m_eager) + dynamic_shapes = tuple( + {0: torch.export.Dim("dim")} if i == 0 else None + for i in range(len(example_inputs)) + ) + m = capture_pre_autograd_graph( + m, + example_inputs, + dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, + ) + + if is_qat: + m = prepare_qat_pt2e(m, quantizer) + else: + m = prepare_pt2e(m, quantizer) + # Calibrate + m(*example_inputs) + m = convert_pt2e(m) + if is_debug_mode: + print("quantized model", m) + + pt2_quant_output = m(*example_inputs) + ns = NodeSpec + node_occurrence = { + ns.call_function(k): v for k, v in expected_node_occurrence.items() + } + if expected_node_list is None: + expected_node_list = [] + node_list = [ns.call_function(n) for n in expected_node_list] + self.checkGraphModuleNodes( + m, expected_node_occurrence=node_occurrence, expected_node_list=node_list + ) + if check_against_fx_quant: + qconfig_mapping = fx_qconfig_mapping + backend_config = get_executorch_backend_config() + m_copy = copy.deepcopy(m_eager) + m_fx = prepare_fx( + m_copy, qconfig_mapping, example_inputs, backend_config=backend_config + ) + m_fx(*example_inputs) + m_fx = _convert_to_reference_decomposed_fx( + m_fx, backend_config=backend_config + ) + m_fx = capture_pre_autograd_graph( + m_fx, + example_inputs, + dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, + ) + node_occurrence = {} + for k, v in PT2EQuantizationTestCase._MAP_TO_FX_TRACED_OPS.items(): + if k in expected_node_occurrence: + node_occurrence[ns.call_function(v)] = expected_node_occurrence[k] + self.checkGraphModuleNodes(m_fx, expected_node_occurrence=node_occurrence) + fx_quant_output = m_fx(*example_inputs) + self.assertEqual(fx_quant_output, pt2_quant_output) + + def _quantize(self, m, quantizer, example_inputs, is_qat: bool = False): + # resetting dynamo cache + torch._dynamo.reset() + + m = capture_pre_autograd_graph( + m, + example_inputs, + ) + if is_qat: + m = prepare_qat_pt2e(m, quantizer) + else: + m = prepare_pt2e(m, quantizer) + m(*example_inputs) + m = convert_pt2e(m) + return m + + def _get_pt2e_quantized_linear(self, is_per_channel=False) -> torch.fx.GraphModule: + class M(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(2, 2) + + def forward(self, x): + return self.linear(x) + + quantizer = XNNPACKQuantizer() + operator_config = get_symmetric_quantization_config(is_per_channel=is_per_channel) + quantizer.set_global(operator_config) + example_inputs = (torch.randn(2, 2),) + m = M().eval() + return self._quantize(m, quantizer, example_inputs) + +# Below are a series of toy models to use in testing quantization + +class SingleLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class AnnotatedSingleLayerLinearModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerLinearDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearAddModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = torch.add(x, 5) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class RNNDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRU': + self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float) + if mod_type == 'LSTM': + self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class RNNCellDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRUCell': + self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float) + if mod_type == 'LSTMCell': + self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float) + if mod_type == 'RNNReLU': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float) + if mod_type == 'RNNTanh': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class LSTMwithHiddenDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x, hid): + x, hid = self.lstm(x, hid) + return x, hid + +class ConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvTransposeModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvTransposeModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = default_qconfig + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnReLUModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnReLUModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.dequant(x) + return x + + def fuse_model(self): + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv', 'bn', 'relu']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearModelWithSubmodule(nn.Module): + def __init__(self): + super().__init__() + self.subm = TwoLayerLinearModel() + self.fc = nn.Linear(5, 5) + + def forward(self, x): + x = self.subm(x) + x = self.fc(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.subm.get_example_inputs() + +class AnnotatedTwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) + self.fc2.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ActivationsTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + self.quant = torch.ao.quantization.QuantStub() + self.hardswish = torch.nn.Hardswish().to(dtype=torch.float) + self.elu = torch.nn.ELU().to(dtype=torch.float) + self.dequant = torch.ao.quantization.DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.hardswish(x) + x = self.elu(x) + x = self.dequant(x) + return x + +class LinearReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + + +class LinearReluLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearBnLeakyReluModel(torch.nn.Module): + def __init__(self, with_bn=True): + super().__init__() + self.linear = nn.Linear(5, 5) + self.bn1d = nn.BatchNorm1d(5) + self.leaky_relu = nn.LeakyReLU(0.01) + self.with_bn = with_bn + + def forward(self, x): + x = self.linear(x) + if self.with_bn: + x = self.bn1d(x) + x = self.leaky_relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearTanhModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(5, 5) + self.tanh = nn.Tanh() + + def forward(self, x): + x = self.linear(x) + x = self.tanh(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ConvBnAddReluModel(torch.nn.Module): + def __init__(self, + with_bn=True, + with_relu=True, + left_conv=True, + two_conv=True, + use_torch_add=True): + super().__init__() + self.conv = nn.Conv2d(5, 5, (2, 2)) + self.conv2 = nn.Conv2d(5, 5, (2, 2)) + self.bn = nn.BatchNorm2d(5) + self.relu = nn.ReLU() + self.with_bn = with_bn + self.with_relu = with_relu + self.two_conv = two_conv + self.left_conv = left_conv + self.use_torch_add = use_torch_add + + def forward(self, x1, x2): + if self.two_conv: + if self.use_torch_add: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), self.conv2(x1)) + else: + x = torch.add(self.conv(x1), self.conv2(x1)) + else: + if self.with_bn: + x = self.bn(self.conv(x1)) + self.conv2(x1) + else: + x = self.conv(x1) + self.conv2(x1) + else: + if self.use_torch_add: + if self.left_conv: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), x2) + else: + x = torch.add(self.conv(x1), x2) + else: + if self.with_bn: + x = torch.add(x2, self.bn(self.conv(x1))) + else: + x = torch.add(x2, self.conv(x1)) + else: + if self.left_conv: + if self.with_bn: + x = self.bn(self.conv(x1)) + x2 + else: + x = self.conv(x1) + x2 + else: + if self.with_bn: + x = x2 + self.bn(self.conv(x1)) + else: + x = x2 + self.conv(x1) + if self.with_relu: + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5, 3, 3), torch.rand(1, 5, 2, 2)) + +# TODO: self.fc should be self.conv +class ConvReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class NormalizationTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.quant = torch.ao.quantization.QuantStub() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.layer_norm = torch.nn.LayerNorm(8) + self.group_norm = torch.nn.GroupNorm(2, 8) + self.instance_norm1d = torch.nn.InstanceNorm1d(8) + self.instance_norm2d = torch.nn.InstanceNorm2d(8) + self.instance_norm3d = torch.nn.InstanceNorm3d(8) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.layer_norm(x) + x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3)) + x = self.instance_norm1d(x) + x = self.instance_norm2d(x.unsqueeze(-1)) + x = self.instance_norm3d(x.unsqueeze(-1)) + return x + +class NestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedNestedModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + if qengine == 'fbgemm': + self.sub2.fc1.qconfig = default_per_channel_qconfig + else: + self.sub2.fc1.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedSubNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedCustomConfigNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + custom_options = { + 'dtype': torch.quint8, + 'qscheme': torch.per_tensor_affine + } + custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options), + weight=default_weight_observer) + self.sub2.fc1.qconfig = custom_qconfig + + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + self.sub2.fc2 = QuantWrapper(self.sub2.fc2) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class QuantSubModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.sub2.qconfig = default_qconfig + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.fc3.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class InnerModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu1 = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + self.relu2 = torch.nn.ReLU() + + def forward(self, x): + return self.relu2(self.fc2(self.relu1(self.fc1(x)))) + + def fuse_modules(self): + fusable_layers = [] + named_children = list(self.named_children()) + for idx, (current_name, layer) in enumerate(named_children): + if isinstance(layer, torch.nn.Linear): + if idx >= len(named_children) - 1: + break + if isinstance(named_children[idx + 1][1], torch.nn.ReLU): + fusable_layers.append([current_name, + named_children[idx + 1][0]]) + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, fusable_layers, inplace=True) + else: + torch.ao.quantization.fuse_modules(self, fusable_layers, inplace=True) + +class FunctionalLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand((5, 5)) + self.bias = torch.zeros(5) + + def forward(self, x): + return F.linear(x, self.weight, self.bias) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class TwoLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = torch.add(x, 5) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearReluModel(nn.Module): + def __init__(self): + super().__init__() + self.linear = FunctionalLinear() + + def forward(self, x): + x = self.linear(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear.get_example_inputs() + +class FunctionalLinearReluLinearModel(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.relu = nn.ReLU() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.relu(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalConv2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand(3, 3, 3, 3) + self.bias = torch.rand(3) + self.stride = (1, 1) + self.padding = (0, 0) + self.dilation = (1, 1) + self.groups = 1 + + def forward(self, x): + return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class SingleLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class TwoLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class FunctionalConvReluModel(nn.Module): + def __init__(self): + super().__init__() + self.conv = FunctionalConv2d() + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv.get_example_inputs() + +class FunctionalConvReluConvModel(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.relu = nn.ReLU() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class SkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self): + super().__init__() + self.sub = InnerModule() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.fuse_modules() + +class AnnotatedSkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.sub = QuantWrapper(InnerModule()) + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + # don't quantize this fc + self.fc.qconfig = None + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.module.fuse_modules() + +class QuantStubModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc(x) + return self.dequant(x) + +class ManualLinearQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualDropoutQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.dropout = torch.nn.Dropout(0.5) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.dropout(x) + return self.dequant(x) + +class ManualLinearDynamicQATModel(torch.nn.Module): + r"""A Module that uses a dynamic QAT by default. + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig or default_dynamic_qat_qconfig + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + +class ManualConvLinearQATModel(torch.nn.Module): + r"""A module with manually inserted `QuantStub` and `DeQuantStub` + and contains both linear and conv modules + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig if qconfig else torch.ao.quantization.get_default_qat_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float) + self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = x.view(-1, 64).contiguous() + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualConvLinearSymmQATModel(ManualConvLinearQATModel): + r"""Same as ManualConvLinearQATModule but with Symmetric Quantization. + Supported only with qnnpack. + """ + def __init__(self): + super().__init__(default_symmetric_qnnpack_qat_qconfig) + +class ManualEmbeddingBagLinear(nn.Module): + def __init__(self): + super().__init__() + self.emb = nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum') + self.emb.qconfig = default_embedding_qat_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None, + per_sample_weights: Optional[torch.Tensor] = None): + x = self.emb(input, offsets, per_sample_weights) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class DeFusedEmbeddingBagLinear(nn.Module): + r"""A module to simulate QAT embedding bag with a linear layer, + this module uses a separate embedding and bagging op, similar + to that which is described in the EmbeddingBag documentation. + + https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html + """ + def __init__(self) -> None: + super().__init__() + self.emb = nn.Embedding(num_embeddings=10, embedding_dim=12) + self.emb.qconfig = default_embedding_qat_qconfig + self.bagging_op = torch.sum + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor) -> torch.Tensor: + x = self.bagging_op(self.emb(input), dim=1) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class SubModelForFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.bn = nn.BatchNorm2d(2).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class SubModelWithoutFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=False).to(dtype=torch.float) + + def forward(self, x): + return self.relu(self.conv(x)) + +class ModelForFusion(nn.Module): + def __init__(self, qconfig): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.sub1 = SubModelForFusion() + self.sub2 = SubModelWithoutFusion() + self.fc = nn.Linear(36, 10).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.qconfig = qconfig + self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float) + self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) + self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float) + self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float) + self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float) + self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float) + # don't quantize sub2 + self.sub2.qconfig = None + self.fc.qconfig = None + + def forward(self, x): + x = x.squeeze(2) + x = self.quant(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu4(x) + x = x.unsqueeze(2) + y = x.unsqueeze(2) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.sub1(x) + x = self.dequant(x) + x = self.sub2(x) + x = x.reshape(-1, 36).contiguous() + x = self.fc(x) + y = self.conv2(y) + y = self.relu2(y) + y = self.bn2(y) + y = self.relu3(y) + y = self.dequant(y) + return x + +class ConvBNReLU(nn.Sequential): + def __init__(self): + super().__init__( + nn.Conv2d(3, 3, 1, 1, bias=False), + nn.BatchNorm2d(3), + nn.ReLU(inplace=False) + ) + +class ModelWithSequentialFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, 1) + self.relu1 = nn.ReLU(inplace=False) + layers = [] + for i in range(3): + layers.append(ConvBNReLU()) + self.features = nn.Sequential(*layers) + head = [nn.Linear(300, 10), nn.ReLU(inplace=False)] + self.classifier = nn.Sequential(*head) + self.seq = nn.Sequential() + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.relu1(x) + x = self.features(x) + x = torch.reshape(x, (-1, 3 * 10 * 10)) + x = self.classifier(x) + x = self.seq(x) + x = self.dequant(x) + return x + +class ModelForFusionWithBias(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float) + self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.dequant(x) + return x + +class ModelForLinearBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.fc = nn.Linear(20, 10) + self.bn = nn.BatchNorm1d(10) + nn.init.uniform_(self.bn.weight) + nn.init.uniform_(self.bn.bias) + + def forward(self, x): + return self.bn(self.fc(x)) + +class DummyObserver(torch.nn.Module): + def calculate_qparams(self): + return 1.0, 0 + + def forward(self, x): + return x + + +class ModelForConvTransposeBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.ConvTranspose1d(3, 3, 1) + self.bn1 = nn.BatchNorm1d(3) + self.conv2 = nn.ConvTranspose2d(3, 3, 1) + self.bn2 = nn.BatchNorm2d(3) + self.conv3 = nn.ConvTranspose3d(3, 3, 1) + self.bn3 = nn.BatchNorm3d(3) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = x.unsqueeze(2) + x = self.conv2(x) + x = self.bn2(x) + x = x.unsqueeze(2) + x = self.conv3(x) + x = self.bn3(x) + return x + + +class ModelWithFunctionals(torch.nn.Module): + def __init__(self): + super().__init__() + self.mycat = nnq.FloatFunctional() + self.myadd = nnq.FloatFunctional() + self.myadd_relu = nnq.FloatFunctional() + self.mymatmul = nnq.FloatFunctional() + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # self.my_scalar_add = nnq.FloatFunctional() + # self.my_scalar_mul = nnq.FloatFunctional() + + def forward(self, x): + y = self.mycat.cat([x, x, x]) + z = self.myadd.add(y, y) + w = self.myadd_relu.add_relu(z, z) + u = self.mymatmul.matmul(w, w.T) + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # w = self.my_scalar_add.add_scalar(w, -0.5) + # w = self.my_scalar_mul.mul_scalar(w, 0.5) + return u + + +class ResNetBase(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.myop = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = torch.nn.Linear(inplanes, 1) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.myop.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = torch.flatten(out, 1) + out = self.fc(out) + return out + + def fuse_model(self): + # TODO: remove this check and define two fuse_model function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv1', 'bn1', 'relu1']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True) + +class ModelMultipleOps(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.skip_add.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +# Model to ensure consistency of fake quant with true quant +# Average pooling and mean operations are not modelled +# accurately with fake-quant so this model does not +# contain those operations +class ModelMultipleOpsNoAvgPool(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.maxpool = nn.MaxPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + skip = self.conv2(x) + out = self.skip_add.add(out, skip) + out = self.relu2(out) + out = self.maxpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +class EmbeddingBagModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + + def forward(self, indices, offsets, per_sample_weights): + return self.emb(indices, offsets, per_sample_weights) + +class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + +class EmbeddingWithStaticLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12) + self.fc = torch.nn.Linear(4, 2) + self.emb.qconfig = float_qparams_weight_only_qconfig + self.qconfig = default_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, indices, offsets, linear_in): + emb = self.emb(indices, offsets) + q_x = self.quant(linear_in) + fc = self.fc(q_x) + fc = self.dequant(fc) + features = torch.cat([fc] + [emb], dim=1) + return features + +class DenseTopMLP(nn.Module): + + def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None: + super().__init__() + + self.dense_mlp = nn.Sequential( + nn.Linear(dense_dim, dense_out), + ) + self.top_mlp = nn.Sequential( + nn.Linear(dense_out + embedding_dim, top_out_in), + nn.Linear(top_out_in, top_out_out), + ) + + def forward( + self, + sparse_feature: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + dense_feature = self.dense_mlp(dense) + features = torch.cat([dense_feature] + [sparse_feature], dim=1) + + out = self.top_mlp(features) + return out + +# thin wrapper around embedding bag, because tracing inside nn.Embedding +# bag is not supported at the moment and this is top level +class EmbBagWrapper(nn.Module): + def __init__(self, num_embeddings, embedding_dim): + super().__init__() + self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum') + + def forward(self, indices, offsets): + return self.emb_bag(indices, offsets) + +class SparseNNModel(nn.Module): + _NUM_EMBEDDINGS = 10 + _EMBEDDING_DIM = 5 + _DENSE_DIM = 4 + _DENSE_OUTPUT = 2 + _TOP_OUT_IN = 2 + _TOP_OUT_OUT = 2 + _TOP_MLP_DIM = 1 + + def __init__(self) -> None: + super().__init__() + + self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM) + self.dense_top = DenseTopMLP( + self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN, + self._TOP_OUT_OUT) + + def forward( + self, + sparse_indices: torch.Tensor, + sparse_offsets: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + + sparse_feature = self.model_sparse(sparse_indices, sparse_offsets) + out = self.dense_top(sparse_feature, dense) + + return out + +class TestHelperModules: + class Conv2dPropAnnotaton(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.linear = torch.nn.Linear(3, 3) + + def forward(self, x): + x = self.conv(x) + x = x.view(-1, 3) + x = torch.nn.functional.hardtanh(x, -0.5, 0.5) + x = self.linear(x) + return x + + class Conv2dWithObsSharingOps(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.hardtanh = torch.nn.Hardtanh() + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + x = self.hardtanh(x) + x = torch.mean(x) + return x + + class Conv2dWithTwoLinearPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(16, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear2(self.linear1(permute_out)) + + class Conv2dWithTwoLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(64, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + reshape_out = torch.reshape(conv_out, (2, 64)) + return self.linear2(self.linear1(reshape_out)) + + class ConvLinearWPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 8, 3) + self.linear1 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear1(permute_out) + + class TwoLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = torch.nn.Linear(8, 16, bias=False) + self.linear2 = torch.nn.Linear(16, 8) + + def forward(self, x): + return self.linear2(self.linear1(x)) + + class ConvMaxPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(2, 2, 1) + self.pool = torch.nn.MaxPool2d(1, 1) + + def forward(self, x): + x = self.conv(x) + x = self.pool(x) + return x + + class ConvWithAdaptiveAvgPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + return x + + class ConvWithBNRelu(torch.nn.Module): + def __init__(self, relu, dim=2, bn=True, bias=True): + super().__init__() + convs = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d} + bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} + self.conv = convs[dim](3, 3, 3, bias=bias) + + if bn: + self.bn = bns[dim](3) + else: + self.bn = torch.nn.Identity() + if relu: + self.relu = torch.nn.ReLU() + else: + self.relu = torch.nn.Identity() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + class ConvTWithBNRelu(torch.nn.Module): + def __init__(self, relu, dim=2, bn=True, bias=True): + super().__init__() + convts = {1: torch.nn.ConvTranspose1d, 2: torch.nn.ConvTranspose2d} + bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} + self.convt = convts[dim](3, 3, 3, bias=bias) + + if bn: + self.bn = bns[dim](3) + else: + self.bn = torch.nn.Identity() + if relu: + self.relu = torch.nn.ReLU() + else: + self.relu = torch.nn.Identity() + + def forward(self, x): + x = self.convt(x) + x = self.bn(x) + return self.relu(x) + + class Conv2dThenConv1d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1d = torch.nn.Conv1d(3, 3, 3) + self.conv2d = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x): + x = self.conv2d(x) + x = x.squeeze(0) + x = self.conv1d(x) + return x + + def example_inputs(self): + return (torch.randn(1, 3, 5, 5),) + + class Conv2dWithCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x, y): + x = self.conv1(x) + y = self.conv2(y) + z = torch.cat([x, y], dim=1) + return z + + class Conv2dWithTwoCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x1, x2, x3, x4): + x1 = self.conv1(x1) + x2 = self.conv2(x2) + y = torch.cat([x1, x2], dim=1) + z = x3 + x4 + w = torch.cat([z, y]) + return w + + class ThreeAdd(torch.nn.Module): + def forward(self, x1, x2, x3, x4): + y = x1 + x2 + z = x3 + x4 + w = y + z + return w + + class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + + class EmbeddingConvLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=8) + self.conv = torch.nn.Conv2d(8, 16, (1, 3)) + self.linear = torch.nn.Linear(16, 8) + + def forward(self, indices): + embeddings = self.emb(indices) + embeddings = torch.unsqueeze(embeddings, dim=0) + embeddings = torch.permute(embeddings, (0, 3, 1, 2)) + conv_out = self.conv(embeddings) + conv_out = torch.permute(conv_out, (0, 2, 3, 1)) + conv_out = torch.squeeze(conv_out, dim=0) + return self.linear(conv_out) + + class AddInplaceAdd(torch.nn.Module): + def forward(self, x, y): + x = x + y + x += y + return x + + class MulInplaceMul(torch.nn.Module): + def forward(self, x, y): + x = x * y + x *= y + return x + + class AddMulScalar(torch.nn.Module): + def forward(self, x): + x = x + 3 + x = x * 3 + x += 3 + x *= 3 + return x + + class ConvBnReLU2dAndLinearReLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv_bn_relu = TestHelperModules.ConvWithBNRelu(relu=True) + self.linear = torch.nn.Linear(3, 8, bias=False) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.conv_bn_relu(x) + permute_out = torch.permute(x, (0, 2, 3, 1)) + linear_out = self.linear(permute_out) + return linear_out + + class GroupwiseConv2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(4, 4, 3, groups=2) + + def forward(self, x): + return self.conv(x) + + def example_inputs(self): + return (torch.randn(2, 4, 10, 10),) + + class LinearReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd7b827dde32f0d26380bdbe5766c1eece3da6e --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py @@ -0,0 +1,227 @@ +# mypy: ignore-errors + +r"""Importing this file includes common utility methods for checking quantized +tensors and modules. +""" +import numpy as np +import torch +from contextlib import contextmanager +from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS + +supported_qengines = torch.backends.quantized.supported_engines +supported_qengines.remove('none') +# Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326 +# QNNPACK is not supported on PPC +# QNNPACK throws ASAN heap-buffer-overflow error. +if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]): + supported_qengines.remove('qnnpack') + +def _conv_output_shape(input_size, kernel_size, padding, stride, dilation, + output_padding=0): + """Computes the output shape given convolution parameters.""" + return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1) + * (dilation - 1)) / stride) + 2 * output_padding + 1 + +# Quantization references +def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): + """Quantizes a numpy array.""" + if qmin is None: + qmin = np.iinfo(dtype).min + if qmax is None: + qmax = np.iinfo(dtype).max + qx = np.round(x / scale + zero_point).astype(np.int64) + qx = np.clip(qx, qmin, qmax) + qx = qx.astype(dtype) + return qx + + +def _dequantize(qx, scale, zero_point): + """Dequantizes a numpy array.""" + x = (qx.astype(float) - zero_point) * scale + return x + + +def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): + """Requantizes a numpy array, i.e., intermediate int32 or int16 values are + converted back to given type""" + qx = (x * multiplier).round() + zero_point + qx = np.clip(qx, qmin, qmax).astype(qtype) + return qx + +def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric) + if qscheme == torch.per_tensor_symmetric: + assert dtype == torch.qint8 + if isinstance(X, torch.Tensor): + X = X.numpy() + if dtype == torch.qint8: + if reduce_range: + qmin, qmax = -64, 63 + else: + qmin, qmax = -128, 127 + else: # dtype == torch.quint8 + if reduce_range: + qmin, qmax = 0, 127 + else: + qmin, qmax = 0, 255 + min_val = X.min() + max_val = X.max() + is_symmetric = (qscheme == torch.per_tensor_symmetric) + if min_val == max_val: + scale = 1.0 + zero_point = 0 + else: + if is_symmetric: + max_val = max(max_val, -min_val) + min_val = -max_val + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = qmin - round(min_val / scale) + zero_point = max(qmin, zero_point) + zero_point = min(qmax, zero_point) + return [float(scale), int(zero_point)] + +def _calculate_dynamic_per_channel_qparams(X, dtype): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + if isinstance(X, torch.Tensor): + X = X.numpy() + qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max + n_levels = qmax - qmin + scale = np.zeros(X.shape[0], dtype=np.float64) + zero_point = np.zeros(X.shape[0], dtype=np.int64) + for i in range(zero_point.shape[0]): + min_val = X.min() + max_val = X.max() + if min_val == max_val: + scale[i] = 1.0 + zero_point[i] = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale[i] = (max_val - min_val) / n_levels + scale[i] = max(scale[i], np.finfo(np.float32).eps) + zero_point[i] = qmin - round(min_val / scale[i]) + zero_point[i] = max(qmin, zero_point[i]) + zero_point[i] = min(qmax, zero_point[i]) + + return scale, zero_point + +def _snr(x, x_hat): + """Calculates the signal to noise ratio and returns the signal and noise + power, as well as the SNR in dB. + If the input is a list/tuple this function is called recursively on each + element. The result will have the same nested structure as the inputs. + + Args: + x, x_hat: Either a tensor or a nested list/tuple of tensors. + Returns: + signal, noise, SNR(in dB): Either floats or a nested list of floats + """ + if isinstance(x, (list, tuple)): + assert len(x) == len(x_hat) + res = [] + for idx in range(len(x)): + res.append(_snr(x[idx], x_hat[idx])) + return res + if x_hat.is_quantized: + x_hat = x_hat.dequantize() + if x.is_quantized: + x = x.dequantize() + noise = (x - x_hat).norm() + if noise == 0: + return 0.0, float('inf'), float('inf') + signal = x.norm() + snr = signal / noise + snr_db = 20 * snr.log10() + return signal, noise, snr_db + +@contextmanager +def override_quantized_engine(qengine): + previous = torch.backends.quantized.engine + torch.backends.quantized.engine = qengine + try: + yield + finally: + torch.backends.quantized.engine = previous + +@contextmanager +def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack): + try: + if qengine_is_qnnpack: + torch._C._set_default_mobile_cpu_allocator() + yield + finally: + if qengine_is_qnnpack: + torch._C._unset_default_mobile_cpu_allocator() + +# TODO: Update all quantization tests to use this decorator. +# Currently for some of the tests it seems to have inconsistent params +# for fbgemm vs qnnpack. +def override_qengines(qfunction): + def test_fn(*args, **kwargs): + for qengine in supported_qengines: + with override_quantized_engine(qengine): + # qfunction should not return anything. + qfunction(*args, **kwargs) + return test_fn + +def qengine_is_fbgemm(): + return torch.backends.quantized.engine == 'fbgemm' +def qengine_is_qnnpack(): + return torch.backends.quantized.engine == 'qnnpack' +def qengine_is_onednn(): + return torch.backends.quantized.engine == 'onednn' +def qengine_is_x86(): + return torch.backends.quantized.engine == 'x86' + +# Helper function used to simulate per-channel fake-quant against any axis +def _permute_to_axis_zero(X, axis): + new_axis_list = list(range(X.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = X.permute(tuple(new_axis_list)) + return y, new_axis_list + +# Reference method for fake quantize +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + res = torch.zeros_like(X) + + for i in range(X.size()[0]): + res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) + + per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i] + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +# Reference method for the gradient of the fake quantize operator +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + Xq = torch.zeros_like(X) + for i in range(X.size()[0]): + Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]) + Xq = Xq.permute(tuple(permute_axis_list)) + mask = (Xq >= quant_min) * (Xq <= quant_max) + res = torch.zeros_like(dY) + res[mask] = dY[mask] + return res.to(dtype) + +def to_tensor(X, device): + if not isinstance(X, torch.Tensor): + X = torch.tensor(X) + else: + X = X.clone().detach() + return X.to(device=torch.device(device), dtype=torch.float32) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c3bd4a130e08d24bb10845ff5b8774f731ff1b --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py @@ -0,0 +1,581 @@ +# mypy: ignore-errors + +import torch +from torch import Tensor +import itertools + +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from torch.utils import _pytree as pytree +from functools import partial +from torch.utils._mode_utils import no_dispatch, all_same_mode +import torch.autograd.forward_ad as fwAD +from typing import Callable +import re + + +def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor): + elem = wrapper_tensor.elem + metadata_wrapper_tensor = metadata_accessor(wrapper_tensor) + metadata_elem = metadata_accessor(elem) + if metadata_wrapper_tensor == metadata_elem: + return + raise RuntimeError( + f"This operator is not Composite Compliant: the " + f"{metadata_name} of the tensor was modified directly without " + f"going through the PyTorch dispatcher.") + +def check_metadata_consistency(wrapper_tensor, CCT): + # CCT: CompositeCompliantTensor class which is generated using generate_cct + if not isinstance(wrapper_tensor, CCT): + return + things_to_check = { + 'shape': Tensor.size, + 'dtype': lambda x: x.dtype, + 'device': lambda x: x.device, + 'numel': Tensor.numel, + 'stride': Tensor.stride, + 'storage_offset': Tensor.storage_offset, + } + for metadata_name, metadata_accessor in things_to_check.items(): + check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor) + +def is_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided', + 'detach', + 'diagonal', + 'expand', + 'expand_as', + 'movedim', + 'narrow', + 'permute', + 'select', + 'squeeze', + 'transpose', + 't', + 'real', + 'imag', + 'view_as_real', + 'view_as_complex', + 'unflatten', + 'unfold', + 'unsqueeze', + 'view', + 'view_as', + 'unbind', + 'split', + 'split_with_sizes', + 'vsplit', + 'hsplit', + 'tensor_split', + 'chunk', + 'swapaxes', + 'slice', + '_reshape_alias', + '_unsafe_view', + '_conj', + 'alias', + } + +# manually populated from native_functions that have inplace_view: True. +# In the future we will probably be able to grab that list directly +def is_inplace_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided_', + 'detach_', + 'squeeze_', + 'swapaxes_', + 'swapdims_', + 't_', + 'transpose_', + 'unsqueeze_', + } + + +# Introspection please save us +def is_inplace(func): + name = func.overloadpacket.__name__ + if re.match('__i.+__', name): + return True + if re.match('__.+__', name): + return False + return name[-1] == '_' + + +def generate_cct_and_mode(autograd_view_consistency=True): + # This function returns a new class CompositeCompliantTensor + # The two arguments control the behaviour described below. + + # autograd_view_consistency: + # If True, alias result using `set_` if func returns a view + # (See Note [Alias Result]). + # Since Forward AD doesn't work with `set_` + # we disable it by setting alias to False. + + class CompositeCompliantTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + @staticmethod + def __new__(cls, elem, mode, *args, **kwargs): + assert type(elem) is not cls, \ + "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported" + + # The storage of CompositeCompliantTensor should never be used directly + # by a Composite operation; if the Composite + # operator attempts to read from the storage without dispatching then it'll + # raise a RuntimeError due to it being a meta storage. + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=elem.requires_grad, + strides=elem.stride(), storage_offset=elem.storage_offset()) + + if elem.requires_grad: + # CompositeCompliantTensor steals the "requires_grad"-ness. + # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests... + tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype, + device=elem.device, layout=elem.layout, + requires_grad=False) + tmp.copy_(elem.detach()) + r.elem = tmp + else: + r.elem = elem + + assert r.stride() == r.elem.stride() + + # Propagate conjugate bits to the wrapper tensor + # Ref: https://github.com/albanD/subclass_zoo/issues/24 + # Ref: https://github.com/albanD/subclass_zoo/issues/21 + torch._C._set_conj(r, r.elem.is_conj()) + torch._C._set_neg(r, r.elem.is_neg()) + + r.mode = mode + return r + + def __repr__(self): + return f"CompositeCompliantTensor({self.elem})" + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + all_args = pytree.arg_tree_leaves(*args, **(kwargs or {})) + modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor)) + if not all_same_mode(modes): + raise RuntimeError("Multiple CompositeCompliantTensorModes NYI") + with modes[0]: + return func(*args, **kwargs) + + class CompositeCompliantTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, CompositeCompliantTensor) else e + + def wrap(e): + return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e + + if func == torch.ops.aten._local_scalar_dense.default: + raise RuntimeError( + ".item() is not allowed to be called inside of composite " + "functions in the PyTorch library because not all backends " + "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.") + + if func.overloadpacket.__name__ in ('set_', 'resize_'): + raise RuntimeError( + f"{func.__name__} is not allowed to be called inside of " + f"Composite operators.") + + if is_inplace(func): + # NB: We are making an assumption that if the function is in-place, + # then the first argument is being written to. Introspection please save us! + mutated_argument = args[0] + if not isinstance(mutated_argument, CompositeCompliantTensor) and \ + any(isinstance(a, CompositeCompliantTensor) for a in args[1:]): + raise RuntimeError( + 'Not composite compliant: performing in-place operation ' + f'{func.__name__} where the Tensor being written to is ' + 'regular Tensor but the other tensors are Tensor Subclasses. ' + 'Please try to avoid this in-place operation.') + + unwrapped_args = tree_map(unwrap, args) + unwrapped_kwargs = tree_map(unwrap, kwargs) + unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs) + rs = tree_map(wrap, unwrapped_rs) + + if is_view_fn(func) and autograd_view_consistency: + # Note [Alias Result] + # Autograd asserts that for B = A.view_fn(...), B and A's storages + # are the same. Here we try to make B alias A to avoid those asserts. + # See https://github.com/pytorch/pytorch/issues/65339 for more information + # about the issue. + with no_dispatch(): + # Idea: this is a weird way of getting a storage that aliases the input. + # This is a workaround for #65339. + # 1. under no_dispatch, all of the wrapper tensors look like regular + # tensors with special storage (the storage is nullptr and + # advertises CPU/CUDA device. + # 2. we run func, which ends up running the view operation + # 3. All view operations reuse the input's storage and return + # result Tensor(s) with new sizes/strides/offset that alias + # the input. + # 4. we set the storage (and sizes/strides/offset) of the wrapper + # tensor results to be that of the tensors that alias the input + result = func(*args, **kwargs) + if isinstance(result, (tuple, list)): + for a, b in zip(rs, result): + a.set_(b) + else: + rs.set_(result) + + # Some operations are allowed to in-place modify the metadata of the + # inputs. The only ones are the "inplace view functions"; when we + # run into these, we manually modify the metadata of the input. + with no_dispatch(): + if is_inplace_view_fn(func): + func(*args, **kwargs) + + # For each CompositeCompliantTensor t, we check that t and t.elem + # have consistent metadata. If they don't have consistent metadata, + # that means the operator did something fishy. + check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor) + pytree.tree_map_(check, args) + pytree.tree_map_(check, kwargs) + pytree.tree_map_(check, rs) + return rs + + return CompositeCompliantTensor, CompositeCompliantTensorMode() + +def is_tensorlist(lst): + if not isinstance(lst, list) and not isinstance(lst, tuple): + return False + if len(lst) == 0: + return False + all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst) + if all_tensors: + return True + exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst) + if exists_one_tensor: + raise RuntimeError('This test assumes that PyTorch APIs cannot take ' + 'mixed lists of Tensor and other things') + return False + + +def maybe_map(fn, should_map, arg): + return fn(arg) if should_map else arg + + +def wrap(arg, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + if isinstance(arg, torch.Tensor): + return CCT(arg, cct_mode) + if is_tensorlist(arg): + return [CCT(a, cct_mode) for a in arg] + raise RuntimeError("wrap assumes that the input can be wrapped") + + +# Given a list of flat arguments, some of which may be Tensors, return all +# possible ways some of the arguments could be CompositeCompliantTensors (CCT). +# For example, given Tensors A, B, C and flat_args = [A, 1, B], +# We would return the following 4 options: +# [CCT(A), 1, CCT(B)] +# [CCT(A), 1, B] +# [A, 1, CCT(B)] +# [A, 1, B] +# NB: Yes, this is exponential. No, we don't care too much because PyTorch ops +# don't accept that many input Tensors. +def generate_subclass_choices(flat_args, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args] + subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes] + + for which_args_are_wrapped in itertools.product(*subclass_options): + + result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg) + for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)] + yield result, which_args_are_wrapped + + +# For an operation f(*args, **kwargs), each Tensor argument may either be +# a regular Tensor or a Tensor Subclass. This iterator iterates through +# all of those options. +def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + flat_kwargs, spec = tree_flatten(kwargs) + flat_args_kwargs = list(args) + list(flat_kwargs) + for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode): + new_args = choice[:len(args)] + new_kwargs = tree_unflatten(choice[len(args):], spec) + which_args_are_wrapped = debug_metadata[:len(args)] + which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec) + yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped + + +def raise_composite_compliance_error(err, additional_info=''): + raise RuntimeError( + "Composite compliance check failed with " + "the above error.\n" + f"{additional_info}" + "If you are adding an OpInfo of an " + "existing operator, please feel free to skip this test " + "because the problem was pre-existing and file an issue. " + "Otherwise, if you added a new operator, please read " + "through the Composite Compliance section in " + "aten/src/ATen/native/README.md for how to resolve this. " + ) from err + + +# This test checks ALL possible permutations of calling `op` with arguments +# that are individually either a regular Tensor or a Tensor subclass. +# +# The general strategy is to wrap some Tensor args and kwargs in +# CompositeCompliantTensor wrappers and call the operation. + +# If some composite operation does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_all_permutations(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + expected = op(*args, **kwargs) + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + try: + actual = op(*new_args, **new_kwargs) + # NOTE: [What errors are Composite Compliance trying to catch?] + # + # There's two things we want to catch: + # - errors that would raise within the torch_dispatch impl + # - data_ptr accesses + # The first is easy to filter for (we could make the error a different + # error class), the second is always going to be a RuntimeError due to + # how it is implemented (if you try to access the data_ptr of thex + # wrapper Tensor, it raises you some internal RuntimeError). + # + # So the most general thing to catch here was RuntimeError. If you + # are here and debugging why your test failed, it's plausible that + # the operator itself is broken and that there are other tests failing. + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +# Checks via the usage of torch dispatch mode certain anti-patterns that +# are not composite compliant. +# +# In particular, the anti-pattern we are trying to prevent is a user +# creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps +# here because all factory functions will create tensors that are +# CompositeCompliantTensor. +# +# The general strategy is to wrap all Tensor args and kwargs in +# CompositeCompliantTensor wrappers. If an operator that is +# Composite does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_with_mode(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + + def wrap(e): + return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e + + expected = op(*args, **kwargs) + + args = tree_map(wrap, args) + kwargs = tree_map(wrap, kwargs) + try: + with cct_mode: + actual = op(*args, **kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error(err) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +def gather_leaf_tensors(args, kwargs): + leaf_tensors = [] + args, args_spec = tree_flatten(args) + kwargs, kwargs_spec = tree_flatten(kwargs) + args = args + kwargs + for arg in args: + if not isinstance(arg, torch.Tensor): + continue + if arg.requires_grad: + leaf_tensors.append(arg) + return leaf_tensors + + +def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None): + if gradcheck_wrapper is None: + results = op(*args, **kwargs) + else: + results = gradcheck_wrapper(op, *args, **kwargs) + + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] + leaf_tensors = gather_leaf_tensors(args, kwargs) + assert len(leaf_tensors) > 0 + return torch.autograd.grad(flat_diff_results, leaf_tensors, + grads, allow_unused=True, retain_graph=True) + + +# Checks if the backward formula is composite compliant by testing +# all possible permutations of {inputs, grad_outputs} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_backward_formula to things that aren't OpInfos +# while debugging. +def check_backward_formula(op: Callable, args, kwargs, + output_process_fn_grad=None, + gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode() + + expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper) + + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + leaf_tensors = gather_leaf_tensors(new_args, new_kwargs) + assert len(leaf_tensors) > 0 + + try: + if gradcheck_wrapper is None: + results = op(*new_args, **new_kwargs) + else: + results = gradcheck_wrapper(op, *new_args, **new_kwargs) + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + # NB: ones, not ones_like, so we get a regular Tensor here + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) + for r in flat_diff_results] + for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode): + try: + actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads, + allow_unused=True, retain_graph=True) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_grads: {which_grad_is_batched}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True) + +# Checks if the forward AD formula is composite compliant by testing +# all possible permutations of {primals, tangents} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_forward_ad_formula to things that aren't OpInfos +# while debugging. +def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False) + + def maybe_tangent(t): + assert type(t) is not CCT + # Generate `tangent` tensor + # if given object is a Tensor and requires grad is set. + if isinstance(t, torch.Tensor) and t.requires_grad: + return torch.randn_like(t) + elif is_tensorlist(t): + return [torch.randn_like(e) if e.requires_grad else None for e in t] + return None + + tangent_args = tuple(maybe_tangent(arg) for arg in args) + flat_kwargs, spec = tree_flatten(kwargs) + flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs) + tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec) + + with fwAD.dual_level(): + def maybe_make_dual(dual): + # Returns dual tensor if primal is a tensor/tensor subclass + # with requires_grad set. + primal, tangent = dual + if isinstance(primal, torch.Tensor) and primal.requires_grad: + return fwAD.make_dual(primal.detach(), tangent) + elif is_tensorlist(primal): + return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri + for pri, tang in zip(primal, tangent)) + return primal + + def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs): + op_args = tuple(map(maybe_make_dual, zip(args, tangent_args))) + op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()} + + if gradcheck_wrapper is None: + return op(*op_args, **op_kwargs) + return gradcheck_wrapper(op, *op_args, **op_kwargs) + + expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs) + expected = tree_map(fwAD.unpack_dual, expected) + expected_primals = tree_map(lambda x: x.primal, expected) + expected_tangents = tree_map(lambda x: x.tangent, expected) + + # Permutations of arg and kwargs in CCT. + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + # Permutations tangent arg and tangent kwargs in CCT. + for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode): + new_tang_args, new_tang_kwargs, \ + which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice + + op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args))) + op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()} + + try: + if gradcheck_wrapper is None: + actual = op(*op_args, **op_kwargs) + else: + actual = gradcheck_wrapper(op, *op_args, **op_kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n" + f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + actual = tree_map(fwAD.unpack_dual, actual) + actual_primals = tree_map(lambda x: unwrap(x.primal), actual) + actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual) + assert_equal_fn(actual_primals, expected_primals, equal_nan=True) + assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py new file mode 100644 index 0000000000000000000000000000000000000000..71a2a8f10651d09855828a98dbf53179c9c3eb77 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py @@ -0,0 +1,489 @@ +# mypy: allow-untyped-defs +import torch +import functools +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np +from torch.testing._internal.autograd_function_db import ( + sample_inputs_numpy_cube, + sample_inputs_numpy_mul, + sample_inputs_numpy_mul_scalar, + sample_inputs_numpy_sort, + sample_inputs_numpy_take, +) +from torch import Tensor +from torch.types import Number +from typing import * # noqa: F403 + +# Note: [custom op db] +# +# This is a collection of custom operator test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if subsystems +# support them correctly. + +def to_numpy(tensor): + return tensor.cpu().numpy() + +@torch.library.custom_op("_torch_testing::numpy_cube", mutates_args=()) +def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]: + x_np = to_numpy(x) + dx = torch.tensor(3 * x_np ** 2, device=x.device) + return torch.tensor(x_np ** 3, device=x.device), dx + +@numpy_cube.register_fake +def _(x): + return x.clone(), x.clone() + +def numpy_cube_setup_context(ctx, inputs, output): + x, = inputs + cube, dx = output + ctx.save_for_backward(x, dx) + +def numpy_cube_backward(ctx, grad_out, grad_dx): + x, dx = ctx.saved_tensors + grad_x = numpy_mul(grad_out, dx) + 6 * numpy_mul(grad_dx, x) + return grad_x + +numpy_cube.register_autograd(numpy_cube_backward, setup_context=numpy_cube_setup_context) + +@torch.library.custom_op("_torch_testing::numpy_mul", mutates_args=()) +def numpy_mul(x: Tensor, y: Tensor) -> Tensor: + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + +@numpy_mul.register_fake +def _(x, y): + assert x.device == y.device + return (x * y).contiguous() + +def numpy_mul_setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + +def numpy_mul_backward(ctx, grad_out): + x, y = ctx.saved_tensors + grad_x = grad_out * y if ctx.needs_input_grad[0] else None + grad_y = grad_out * x if ctx.needs_input_grad[1] else None + return grad_x, grad_y + +numpy_mul.register_autograd(numpy_mul_backward, setup_context=numpy_mul_setup_context) + +@torch.library.custom_op("_torch_testing::numpy_mul_scalar", mutates_args=()) +def numpy_mul_scalar(x: Tensor, *, scalar: float) -> Tensor: + return torch.tensor(to_numpy(x) * scalar, device=x.device) + +@numpy_mul_scalar.register_fake +def _(x, *, scalar): + return (x * scalar).contiguous() + +def numpy_mul_scalar_setup_context(ctx, inputs, keyword_only_inputs, output): + ctx.scalar = keyword_only_inputs["scalar"] + +def numpy_mul_scalar_backward(ctx, grad_out): + grad_x = grad_out * ctx.scalar + return grad_x + +numpy_mul_scalar.register_autograd(numpy_mul_scalar_backward, setup_context=numpy_mul_scalar_setup_context) + +@torch.library.custom_op("_torch_testing::numpy_sort", mutates_args=()) +def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]: + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(result, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + +@numpy_sort.register_fake +def _(x, dim): + return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long) + +def numpy_sort_setup_context(ctx, inputs, output): + out, ind, ind_inv = output + ctx.dim = inputs[1] + ctx.save_for_backward(ind, ind_inv) + ctx.mark_non_differentiable(ind, ind_inv) + +def numpy_sort_backward(ctx, grad_out, grad_ind, grad_ind_inv): + ind, ind_inv = ctx.saved_tensors + return numpy_take(grad_out, ind_inv, ind, ctx.dim), None + +numpy_sort.register_autograd(numpy_sort_backward, setup_context=numpy_sort_setup_context) + + +@torch.library.custom_op("_torch_testing::numpy_take", mutates_args=()) +def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor: + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + +@numpy_take.register_fake +def _(x, ind, ind_inv, dim): + assert x.device == ind.device + assert x.device == ind_inv.device + assert ind.dtype == torch.long + assert ind_inv.dtype == torch.long + return torch.empty_like(x) + +def numpy_take_setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.dim = dim + ctx.save_for_backward(ind, ind_inv) + +def numpy_take_backward(ctx, grad_out): + ind, ind_inv = ctx.saved_tensors + grad_x = numpy_take(grad_out, ind_inv, ind, ctx.dim) + return grad_x, None, None, None + +numpy_take.register_autograd(numpy_take_backward, setup_context=numpy_take_setup_context) + +@torch.library.custom_op("_torch_testing::numpy_nonzero", mutates_args=()) +def numpy_nonzero(x: Tensor) -> Tensor: + x_np = to_numpy(x) + res = np.stack(np.nonzero(x_np), axis=1) + if res.shape[0] <= 1: + raise RuntimeError("not supported") + return torch.tensor(res, device=x.device) + +@numpy_nonzero.register_fake +def _(x): + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + shape = [i0, x.dim()] + result = x.new_empty(shape, dtype=torch.long) + return result + +def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = 10 + result = make_arg(shape, low=0.9, high=2) + mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long) + with torch.no_grad(): + result *= mask + + yield SampleInput(result, args=()) + +@torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=()) +def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor: + return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device) + +@numpy_view_copy.register_fake +def _(x, shape) -> Tensor: + return x.clone().view(shape).clone() + +def numpy_view_copy_setup_context(ctx, inputs, output) -> None: + ctx.x_shape = inputs[0].shape + +def numpy_view_copy_backward(ctx, grad_out): + return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None + +numpy_view_copy.register_autograd(numpy_view_copy_backward, setup_context=numpy_view_copy_setup_context) + +def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + result = make_arg(2, 3, 4, low=0.9, high=2) + yield SampleInput(result, args=([2, 12],)) + +@torch.library.custom_op('_torch_testing::numpy_cat', mutates_args=()) +def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor: + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + np_xs = [to_numpy(x) for x in xs] + np_out = np.concatenate(np_xs, axis=dim) + return torch.tensor(np_out, device=xs[0].device) + +@numpy_cat.register_fake +def _(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + return torch.cat(xs, dim=dim) + +def numpy_cat_setup_context(ctx, inputs, output): + xs, dim = inputs + ctx.dim_sizes = [x.shape[dim] for x in xs] + ctx.dim = dim + +def numpy_cat_backward(ctx, grad_out): + dim_sizes = ctx.dim_sizes + dim = ctx.dim + + splits = list(np.cumsum(dim_sizes)[:-1]) + grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim) + return grad_xs, None + +numpy_cat.register_autograd(numpy_cat_backward, setup_context=numpy_cat_setup_context) + +def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + r0 = make_arg(2, 3, 4, low=0.9, high=2) + r1 = make_arg(4, 3, 4, low=0.9, high=2) + r2 = make_arg(5, 3, 4, low=0.9, high=2) + yield SampleInput([r0, r1, r2], args=(0,)) + +@torch.library.custom_op('_torch_testing::numpy_split_copy', mutates_args=()) +def numpy_split_copy(x: Tensor, splits: Sequence[int], dim: int) -> List[Tensor]: + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs] + +@numpy_split_copy.register_fake +def _(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] + +def numpy_split_copy_setup_context(ctx, inputs, output): + _, _, dim = inputs + ctx.dim = dim + +def numpy_split_copy_backward(ctx, grad_out): + result = torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim) + return result, None, None + +numpy_split_copy.register_autograd(numpy_split_copy_backward, setup_context=numpy_split_copy_setup_context) + +def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg(2, 9, low=0.9, high=2) + yield SampleInput(x, args=([1, 3, 6], 1)) + +@torch.library.custom_op('_torch_testing::numpy_split_copy_with_int', mutates_args=()) +def numpy_split_copy_with_int(x: Tensor, splits: Sequence[int], dim: int) -> Tuple[List[Tensor], int]: + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits) + +@numpy_split_copy_with_int.register_fake +def _(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits) + +def numpy_split_copy_with_int_setup_context(ctx, inputs, output): + _, _, dim = inputs + ctx.dim = dim + +def numpy_split_copy_with_int_backward(ctx, grad_out, _): + return torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim), None, None + +numpy_split_copy_with_int.register_autograd( + numpy_split_copy_with_int_backward, + setup_context=numpy_split_copy_with_int_setup_context) + +@torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=()) +def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor: + # Adapted from Ross Girshick's fast-rcnn implementation at + # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py + assert boxes.device == scores.device + device = boxes.device + + boxes = to_numpy(boxes) + scores = to_numpy(scores) + + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= iou_threshold)[0] + order = order[inds + 1] + + result = torch.tensor(np.stack(keep), device=device) + # Needed for data-dependent condition :( + assert result.size(0) >= 2 + return result + +@numpy_nms.register_fake +def _(boxes, scores, iou_threshold): + assert boxes.device == scores.device + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + result = boxes.new_empty([i0], dtype=torch.int64) + return result + +def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype) + N = 64 + xs = make_arg([N], low=0, high=28) + dx = make_arg([N], low=0, high=4) + ys = make_arg([N], low=0, high=28) + dy = make_arg([N], low=0, high=4) + boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad) + scores = make_arg([N], low=0, high=1, requires_grad=requires_grad) + iou_threshold = make_arg([], low=0, high=1).item() + + yield SampleInput(boxes, args=(scores, iou_threshold)) + +custom_op_db = [ + OpInfo( + 'NumpyCubeCustomOp', + op=numpy_cube._opoverload, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulCustomOp', + op=numpy_mul._opoverload, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulScalarCustomOp', + op=numpy_mul_scalar._opoverload, + sample_inputs_func=sample_inputs_numpy_mul_scalar, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortCustomOp', + op=numpy_sort._opoverload, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyTakeCustomOp', + op=numpy_take._opoverload, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyNonzeroCustomOp', + op=numpy_nonzero._opoverload, + sample_inputs_func=sample_inputs_numpy_nonzero, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyNMSCustomOp', + op=torch.ops._torch_testing.numpy_nms, + sample_inputs_func=sample_inputs_numpy_nms, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyViewCopyCustomOp', + op=torch.ops._torch_testing.numpy_view_copy, + sample_inputs_func=sample_inputs_numpy_view_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + supports_out=False, + ), + OpInfo( + 'NumpyCatCustomOp', + op=torch.ops._torch_testing.numpy_cat, + sample_inputs_func=sample_inputs_numpy_cat, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyCustomOp', + op=torch.ops._torch_testing.numpy_split_copy, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyWithIntCustomOp', + op=torch.ops._torch_testing.numpy_split_copy_with_int, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0], + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), +] + + +# ============================================================== +# some mechanical test cases +# ============================================================== + +lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901 + +lib.define("source0(Tensor x) -> Tensor") + +@torch.library.register_fake("_torch_testing::source0", lib=lib) +def _(x): + return x.clone() + +lib.define("source1(Tensor x) -> Tensor") + +def source1_fake(x): + return x.clone() + +torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib) + +lib.define("source2(Tensor x) -> Tensor") + +@torch.library.register_fake("_torch_testing::source2", lib=lib) +def _(x): + return x.clone() + +lib.define("source3(Tensor x) -> Tensor") + +def source3_fake(x): + return x.clone() + +torch.library.register_fake("_torch_testing::source3", source3_fake, lib=lib) + + +@torch.library.custom_op("_torch_testing::source4", mutates_args=()) +def source4(x: Tensor) -> Tensor: + return x.clone() + +@source4.register_fake +def _(x): + return x.clone() + +@torch.library.custom_op("_torch_testing::source5", mutates_args=()) +def source5(x: Tensor) -> Tensor: + return x.clone() + +def source5_fake(x): + return x.clone() + +source5.register_fake(source5_fake) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..21a1f2011e6f322154ca7c591907ed1518c7cd20 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py @@ -0,0 +1,200 @@ +# mypy: ignore-errors + +import re +import sys +import time +from functools import partial, wraps +from typing import Tuple + +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch.distributed.rpc import _rref_context_get_debug_info +from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN + + +if not dist.is_available(): + print("c10d not available, skipping tests", file=sys.stderr) + sys.exit(0) + + +INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}" + +def dist_init( + old_test_method=None, + setup_rpc: bool = True, + clean_shutdown: bool = True, + faulty_messages=None, + messages_to_delay=None, +): + """ + We use this decorator for setting up and tearing down state since + MultiProcessTestCase runs each `test*` method in a separate process and + each process just runs the `test*` method without actually calling + 'setUp' and 'tearDown' methods of unittest. + + Note: pass the string representation of MessageTypes that should be used + with the faulty agent's send function. By default, all retriable messages + ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is + set from faulty_rpc_agent_test_fixture.py). + """ + # If we use dist_init without arguments (ex: @dist_init), old_test_method is + # appropriately set and we return the wrapper appropriately. On the other + # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)), + # old_test_method is None and we return a functools.partial which is the real + # decorator that is used and as a result we recursively call dist_init with + # old_test_method and the rest of the arguments appropriately set. + if old_test_method is None: + return partial( + dist_init, + setup_rpc=setup_rpc, + clean_shutdown=clean_shutdown, + faulty_messages=faulty_messages, + messages_to_delay=messages_to_delay, + ) + + @wraps(old_test_method) + def new_test_method(self, *arg, **kwargs): + # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted + # in tests. + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = False + self.worker_id = self.rank + self.setup_fault_injection(faulty_messages, messages_to_delay) + + rpc_backend_options = self.rpc_backend_options + if setup_rpc: + if TEST_WITH_TSAN: + # TSAN runs much slower. + rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5 + rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60 + + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + return_value = old_test_method(self, *arg, **kwargs) + + if setup_rpc: + rpc.shutdown(graceful=clean_shutdown) + + return return_value + + return new_test_method + + +def noop() -> None: + pass + + +def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str: + """ + Loops until an RPC to the given rank fails. This is used to + indicate that the node has failed in unit tests. + Args: + rank (int): Rank of the node expected to fail + expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure + occurs, not just any. + """ + while True: + try: + rpc.rpc_sync(f"worker{rank}", noop, args=()) + time.sleep(0.1) + except Exception as e: + if re.search(pattern=expected_error_regex, string=str(e)): + return str(e) + + +def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None: + """ + The RRef protocol holds forkIds of rrefs in a map until those forks are + confirmed by the owner. The message confirming the fork may arrive after + our tests check whether this map is empty, which leads to failures and + flaky tests. to_here also does not guarantee that we have finished + processind the owner's confirmation message for the RRef. This function + loops until the map is empty, which means the messages have been received + as processed. Call this function before asserting the map returned by + _get_debug_info is empty. + """ + start = time.time() + while True: + debug_info = _rref_context_get_debug_info() + num_pending_futures = int(debug_info["num_pending_futures"]) + num_pending_users = int(debug_info["num_pending_users"]) + if num_pending_futures == 0 and num_pending_users == 0: + break + time.sleep(0.1) + if time.time() - start > timeout: + raise ValueError( + f"Timed out waiting to flush pending futures and users, " + f"had {num_pending_futures} pending futures and {num_pending_users} pending users" + ) + + +def get_num_owners_and_forks() -> Tuple[str, str]: + """ + Retrieves number of OwnerRRefs and forks on this node from + _rref_context_get_debug_info. + """ + rref_dbg_info = _rref_context_get_debug_info() + num_owners = rref_dbg_info["num_owner_rrefs"] + num_forks = rref_dbg_info["num_forks"] + return num_owners, num_forks + + +def wait_until_owners_and_forks_on_rank( + num_owners: int, num_forks: int, rank: int, timeout: int = 20 +) -> None: + """ + Waits until timeout for num_forks and num_owners to exist on the rank. Used + to ensure proper deletion of RRefs in tests. + """ + start = time.time() + while True: + num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync( + worker_name(rank), get_num_owners_and_forks, args=(), timeout=5 + ) + num_owners_on_rank = int(num_owners_on_rank) + num_forks_on_rank = int(num_forks_on_rank) + if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks: + return + time.sleep(1) + if time.time() - start > timeout: + raise ValueError( + f"Timed out waiting {timeout} sec for {num_owners} owners and {num_forks} forks on rank," + f" had {num_owners_on_rank} owners and {num_forks_on_rank} forks" + ) + + +def initialize_pg(init_method, rank: int, world_size: int) -> None: + # This is for tests using `dist.barrier`. + if not dist.is_initialized(): + dist.init_process_group( + backend="gloo", + init_method=init_method, + rank=rank, + world_size=world_size, + ) + + +def worker_name(rank: int) -> str: + return f"worker{rank}" + + +def get_function_event(function_events, partial_event_name): + """ + Returns the first event that matches partial_event_name in the provided + function_events. These function_events should be the output of + torch.autograd.profiler.function_events(). + + Args: + function_events: function_events returned by the profiler. + event_name (str): partial key that the event was profiled with. + """ + event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015 + return event diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5c291bc41f2cee1cbb0f311c4c3ee65ec78a7a --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py @@ -0,0 +1,114 @@ +# mypy: allow-untyped-defs +import logging +import os +import sys + +# NOTE: [dynamo_test_failures.py] +# +# We generate xFailIfTorchDynamo* for all tests in `dynamo_expected_failures` +# We generate skipIfTorchDynamo* for all tests in `dynamo_skips` +# +# For an easier-than-manual way of generating and updating these lists, +# see scripts/compile_tests/update_failures.py +# +# If you're adding a new test, and it's failing PYTORCH_TEST_WITH_DYNAMO=1, +# either add the appropriate decorators to your test or add skips for them +# via test/dynamo_skips and test/dynamo_expected_failures. +# +# *These are not exactly unittest.expectedFailure and unittest.skip. We'll +# always execute the test and then suppress the signal, if necessary. +# If your tests crashes, or is slow, please use @skipIfTorchDynamo instead. +# +# The expected failure and skip files are located in test/dynamo_skips and +# test/dynamo_expected_failures. They're individual files rather than a list so +# git will merge changes easier. + + +def find_test_dir(): + # Find the path to the dynamo expected failure and skip files. + from os.path import abspath, basename, dirname, exists, join, normpath + + if sys.platform == "win32": + return None + + # Check relative to this file (local build): + test_dir = normpath(join(dirname(abspath(__file__)), "../../../test")) + if exists(join(test_dir, "dynamo_expected_failures")): + return test_dir + + # Check relative to __main__ (installed builds relative to test file): + main = sys.modules["__main__"] + file = getattr(main, "__file__", None) + if file is None: + # Generated files do not have a module.__file__ + return None + test_dir = dirname(abspath(file)) + while dirname(test_dir) != test_dir: + if basename(test_dir) == "test" and exists( + join(test_dir, "dynamo_expected_failures") + ): + return test_dir + test_dir = dirname(test_dir) + + # Not found + return None + + +test_dir = find_test_dir() +if not test_dir: + logger = logging.getLogger(__name__) + logger.warning( + "test/dynamo_expected_failures directory not found - known dynamo errors won't be skipped." + ) + +# Tests that run without strict mode in PYTORCH_TEST_WITH_INDUCTOR=1. +# Please don't add anything to this list. +FIXME_inductor_non_strict = { + "test_modules", + "test_ops", + "test_ops_gradients", + "test_torch", +} + +# We generate unittest.expectedFailure for all of the following tests +# when run under PYTORCH_TEST_WITH_DYNAMO=1. +# see NOTE [dynamo_test_failures.py] for more details +# +# This lists exists so we can more easily add large numbers of failing tests, +if test_dir is None: + dynamo_expected_failures = set() + dynamo_skips = set() +else: + failures_directory = os.path.join(test_dir, "dynamo_expected_failures") + skips_directory = os.path.join(test_dir, "dynamo_skips") + + dynamo_expected_failures = set(os.listdir(failures_directory)) + dynamo_skips = set(os.listdir(skips_directory)) + +# TODO: due to case sensitivity problems, for now list these files by hand +extra_dynamo_skips = { + "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_t_cpu_float32", +} +dynamo_skips = dynamo_skips.union(extra_dynamo_skips) + + +# verify some invariants +for test in dynamo_expected_failures.union(dynamo_skips): + if len(test.split(".")) != 2: + raise AssertionError(f'Invalid test name: "{test}"') + +intersection = dynamo_expected_failures.intersection(dynamo_skips) +if len(intersection) > 0: + raise AssertionError( + "there should be no overlap between dynamo_expected_failures " + "and dynamo_skips, got " + str(intersection) + ) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py new file mode 100644 index 0000000000000000000000000000000000000000..1602c1ef656258fcee5cb7df6cd1a9d2fb6bc13d --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py @@ -0,0 +1,258 @@ +# mypy: ignore-errors + +import torch +import functools +from torch.testing import make_tensor +import unittest +from functorch.experimental.control_flow import map +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and, custom_types +from torch.testing._internal.opinfo.core import DecorateInfo +from torch.nn.attention._flex_attention import _flex_attention + +def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)], + args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2))) + +def inner_f(x, y0, y1): + return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())] + +def simple_map(xs, y0, y1): + def f(x, y0, y1): + return inner_f(x, y0, y1) + return map(f, xs, y0, y1) + +def nested_map(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + +def triple_nested_map(xs, y0, y1): + def f0(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + return map(f0, xs, y0, y1) + + +# Please consult with torch.export team before +# adding new entry to this list. +hop_that_doesnt_have_opinfo_test_allowlist = [ + "custom_function_call", + "autograd_function_apply", + "run_and_save_rng_state", + "run_with_rng_state", + "out_dtype", + "trace_wrapped", + "map", # T183144629 + "map_impl", + "with_effects", + "strict_mode", + "_export_tracepoint", + "call_torchbind", +] + +torch.library.define( + "testlib::mutating_custom_op", + "(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)", + tags=torch.Tag.pt2_compliant_tag, +) + + +@torch.library.impl("testlib::mutating_custom_op", "cpu") +def foo_impl_cpu(x, z): + x.add_(5) + z.add_(5) + return x, z, x + z + + +@torch.library.impl("testlib::mutating_custom_op", "cuda") +def foo_impl_cuda(x, z): + x.add_(5) + z.add_(5) + return x, z, x + z + + +@torch.library.register_fake("testlib::mutating_custom_op") +def foo_impl_abstract(x, z): + return x, z, x + z + + +def sample_inputs_cond(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=False + ) + yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2)) + + +def simple_cond(x): + return torch.cond(x.shape[0] > 2, lambda x: x.cos(), lambda x: x.sin(), [x]) + + +def sample_inputs_auto_functionalize(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=False + ) + yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)) + + +def simple_auto_functionalize(x, z): + return torch.ops.testlib.mutating_custom_op(x, z) + + +def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + def score_mod(score, b, h, m, n): + return score + h + + yield SampleInput( + make_arg(2, 2, 128, 8, low=0.1, high=2), + make_arg(2, 2, 128, 8, low=0.1, high=2), + make_arg(2, 2, 128, 8, low=0.1, high=2), + score_mod, + ) + +def sample_inputs_while_loop(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=False + ) + yield SampleInput( + torch.tensor(3), + make_arg(2, 3, 4, low=0.1, high=2), + ) + +def simple_while_loop(iter_t, x): + def cond_fn(iter_t, x): + return iter_t > 0 + + def body_fn(iter_t, x): + return iter_t - 1, x.cos() + + return torch._higher_order_ops.while_loop(cond_fn, body_fn, (iter_t, x)) + + +hop_db = [ + OpInfo( + name="map", + variant_test_name="simple", + op=simple_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="map", + variant_test_name="nested", + op=nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="map", + variant_test_name="triple_nested", + op=triple_nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="cond", + variant_test_name="simple", + op=simple_cond, + sample_inputs_func=sample_inputs_cond, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=False, + ), + OpInfo( + name="while_loop", + variant_test_name="simple", + op=simple_while_loop, + sample_inputs_func=sample_inputs_while_loop, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=False, + ), + OpInfo( + name="auto_functionalize", + variant_test_name="simple", + op=simple_auto_functionalize, + sample_inputs_func=sample_inputs_auto_functionalize, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=False, + ), + OpInfo( + name="flex_attention", + variant_test_name="simple", + op=_flex_attention, + sample_inputs_func=sample_inputs_flex_attention, + dtypes=custom_types(torch.float16, torch.float32), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"), + ) + ), + OpInfo( + name="flex_attention_backward", + variant_test_name="simple", + op=_flex_attention, + sample_inputs_func=sample_inputs_flex_attention, + dtypes=custom_types(torch.float16, torch.float32), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"), + ) + ) +] diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..98aa82e1c93d2fa81cf88ce058abb00a619ae7e9 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py @@ -0,0 +1,371 @@ +# mypy: ignore-errors + +from collections import defaultdict +from collections.abc import Iterable +import numpy as np +import torch + +import hypothesis +from functools import reduce +from hypothesis import assume +from hypothesis import settings +from hypothesis import strategies as st +from hypothesis.extra import numpy as stnp +from hypothesis.strategies import SearchStrategy + +from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams + +# Setup for the hypothesis tests. +# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last +# element is enforced zero_point. If None, any zero_point point within the +# range of the data type is OK. + +# Tuple with all quantized data types. +_ALL_QINT_TYPES = ( + torch.quint8, + torch.qint8, + torch.qint32, +) + +# Enforced zero point for every quantized data type. +# If None, any zero_point point within the range of the data type is OK. +_ENFORCED_ZERO_POINT = defaultdict(lambda: None, { + torch.quint8: None, + torch.qint8: None, + torch.qint32: 0 +}) + +def _get_valid_min_max(qparams): + scale, zero_point, quantized_type = qparams + adjustment = 1 + torch.finfo(torch.float).eps + _long_type_info = torch.iinfo(torch.long) + long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment + # make sure intermediate results are within the range of long + min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point)) + max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point)) + return np.float32(min_value), np.float32(max_value) + +# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if +# it is too old, removes the `width` parameter (which was introduced) +# in 3.67.0 +def _floats_wrapper(*args, **kwargs): + if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0): + # As long as nan, inf, min, max are not specified, reimplement the width + # parameter for older versions of hypothesis. + no_nan_and_inf = ( + (('allow_nan' in kwargs and not kwargs['allow_nan']) or + 'allow_nan' not in kwargs) and + (('allow_infinity' in kwargs and not kwargs['allow_infinity']) or + 'allow_infinity' not in kwargs)) + min_and_max_not_specified = ( + len(args) == 0 and + 'min_value' not in kwargs and + 'max_value' not in kwargs + ) + if no_nan_and_inf and min_and_max_not_specified: + if kwargs['width'] == 16: + kwargs['min_value'] = torch.finfo(torch.float16).min + kwargs['max_value'] = torch.finfo(torch.float16).max + elif kwargs['width'] == 32: + kwargs['min_value'] = torch.finfo(torch.float32).min + kwargs['max_value'] = torch.finfo(torch.float32).max + elif kwargs['width'] == 64: + kwargs['min_value'] = torch.finfo(torch.float64).min + kwargs['max_value'] = torch.finfo(torch.float64).max + kwargs.pop('width') + return st.floats(*args, **kwargs) + +def floats(*args, **kwargs): + if 'width' not in kwargs: + kwargs['width'] = 32 + return _floats_wrapper(*args, **kwargs) + +"""Hypothesis filter to avoid overflows with quantized tensors. + +Args: + tensor: Tensor of floats to filter + qparams: Quantization parameters as returned by the `qparams`. + +Returns: + True + +Raises: + hypothesis.UnsatisfiedAssumption + +Note: This filter is slow. Use it only when filtering of the test cases is + absolutely necessary! +""" +def assume_not_overflowing(tensor, qparams): + min_value, max_value = _get_valid_min_max(qparams) + assume(tensor.min() >= min_value) + assume(tensor.max() <= max_value) + return True + +"""Strategy for generating the quantization parameters. + +Args: + dtypes: quantized data types to sample from. + scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3. + zero_point_min / zero_point_max: Min and max for the zero point. If None, + set to the minimum and maximum of the quantized data type. + Note: The min and max are only valid if the zero_point is not enforced + by the data type itself. + +Generates: + scale: Sampled scale. + zero_point: Sampled zero point. + quantized_type: Sampled quantized type. +""" +@st.composite +def qparams(draw, dtypes=None, scale_min=None, scale_max=None, + zero_point_min=None, zero_point_max=None): + if dtypes is None: + dtypes = _ALL_QINT_TYPES + if not isinstance(dtypes, (list, tuple)): + dtypes = (dtypes,) + quantized_type = draw(st.sampled_from(dtypes)) + + _type_info = torch.iinfo(quantized_type) + qmin, qmax = _type_info.min, _type_info.max + + # TODO: Maybe embed the enforced zero_point in the `torch.iinfo`. + _zp_enforced = _ENFORCED_ZERO_POINT[quantized_type] + if _zp_enforced is not None: + zero_point = _zp_enforced + else: + _zp_min = qmin if zero_point_min is None else zero_point_min + _zp_max = qmax if zero_point_max is None else zero_point_max + zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max)) + + if scale_min is None: + scale_min = torch.finfo(torch.float).eps + if scale_max is None: + scale_max = torch.finfo(torch.float).max + scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32)) + + return scale, zero_point, quantized_type + +"""Strategy to create different shapes. +Args: + min_dims / max_dims: minimum and maximum rank. + min_side / max_side: minimum and maximum dimensions per rank. + +Generates: + Possible shapes for a tensor, constrained to the rank and dimensionality. + +Example: + # Generates 3D and 4D tensors. + @given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4)) + some_test(self, Q):... +""" +@st.composite +def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None): + """Return a strategy for array shapes (tuples of int >= 1).""" + assert min_dims < 32 + if max_dims is None: + max_dims = min(min_dims + 2, 32) + assert max_dims < 32 + if max_side is None: + max_side = min_side + 5 + candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims) + if max_numel is not None: + candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel) + return draw(candidate.map(tuple)) + + +"""Strategy for generating test cases for tensors. +The resulting tensor is in float32 format. + +Args: + shapes: Shapes under test for the tensor. Could be either a hypothesis + strategy, or an iterable of different shapes to sample from. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Instance of the qparams strategy. This is used to filter the tensor + such that the overflow would not happen. + +Generates: + X: Tensor of type float32. Note that NaN and +/-inf is not included. + qparams: (If `qparams` arg is set) Quantization parameters for X. + The returned parameters are `(scale, zero_point, quantization_type)`. + (If `qparams` arg is None), returns None. +""" +@st.composite +def tensor(draw, shapes=None, elements=None, qparams=None, dtype=np.float32): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + return X, (scale, zp, qparams[2]) + +@st.composite +def per_channel_tensor(draw, shapes=None, elements=None, qparams=None): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + # Permute to model quantization along an axis + axis = int(np.random.randint(0, X.ndim, 1)) + permute_axes = np.arange(X.ndim) + permute_axes[0] = axis + permute_axes[axis] = 0 + X = np.transpose(X, permute_axes) + + return X, (scale, zp, axis, qparams[2]) + +"""Strategy for generating test cases for tensors used in Conv. +The resulting tensors is in float32 format. + +Args: + spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly + picks one from the pool to make it the spatial dimension + batch_size_range: Range to generate `batch_size`. + Must be tuple of `(min, max)`. + input_channels_per_group_range: + Range to generate `input_channels_per_group`. + Must be tuple of `(min, max)`. + output_channels_per_group_range: + Range to generate `output_channels_per_group`. + Must be tuple of `(min, max)`. + feature_map_range: Range to generate feature map size for each spatial_dim. + Must be tuple of `(min, max)`. + kernel_range: Range to generate kernel size for each spatial_dim. Must be + tuple of `(min, max)`. + max_groups: Maximum number of groups to generate. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Strategy for quantization parameters. for X, w, and b. + Could be either a single strategy (used for all) or a list of + three strategies for X, w, b. +Generates: + (X, W, b, g): Tensors of type `float32` of the following drawen shapes: + X: (`batch_size, input_channels, H, W`) + W: (`output_channels, input_channels_per_group) + kernel_shape + b: `(output_channels,)` + groups: Number of groups the input is divided into +Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either + None or (scale, zero_point, quantized_type) + + +Example: + @given(tensor_conv( + spatial_dim=2, + batch_size_range=(1, 3), + input_channels_per_group_range=(1, 7), + output_channels_per_group_range=(1, 7), + feature_map_range=(6, 12), + kernel_range=(3, 5), + max_groups=4, + elements=st.floats(-1.0, 1.0), + qparams=qparams() + )) +""" +@st.composite +def tensor_conv( + draw, spatial_dim=2, batch_size_range=(1, 4), + input_channels_per_group_range=(3, 7), + output_channels_per_group_range=(3, 7), feature_map_range=(6, 12), + kernel_range=(3, 7), max_groups=1, can_be_transposed=False, + elements=None, qparams=None +): + + # Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW + batch_size = draw(st.integers(*batch_size_range)) + input_channels_per_group = draw( + st.integers(*input_channels_per_group_range)) + output_channels_per_group = draw( + st.integers(*output_channels_per_group_range)) + groups = draw(st.integers(1, max_groups)) + input_channels = input_channels_per_group * groups + output_channels = output_channels_per_group * groups + + if isinstance(spatial_dim, Iterable): + spatial_dim = draw(st.sampled_from(spatial_dim)) + + feature_map_shape = [] + for i in range(spatial_dim): + feature_map_shape.append(draw(st.integers(*feature_map_range))) + + kernels = [] + for i in range(spatial_dim): + kernels.append(draw(st.integers(*kernel_range))) + + tr = False + weight_shape = (output_channels, input_channels_per_group) + tuple(kernels) + bias_shape = output_channels + if can_be_transposed: + tr = draw(st.booleans()) + if tr: + weight_shape = (input_channels, output_channels_per_group) + tuple(kernels) + bias_shape = output_channels + + # Resolve the tensors + if qparams is not None: + if isinstance(qparams, (list, tuple)): + assert len(qparams) == 3, "Need 3 qparams for X, w, b" + else: + qparams = [qparams] * 3 + + X = draw(tensor(shapes=( + (batch_size, input_channels) + tuple(feature_map_shape),), + elements=elements, qparams=qparams[0])) + W = draw(tensor(shapes=(weight_shape,), elements=elements, + qparams=qparams[1])) + b = draw(tensor(shapes=(bias_shape,), elements=elements, + qparams=qparams[2])) + + return X, W, b, groups, tr + +# We set the deadline in the currently loaded profile. +# Creating (and loading) a separate profile overrides any settings the user +# already specified. +hypothesis_version = hypothesis.version.__version_info__ +current_settings = settings._profiles[settings._current_profile].__dict__ +current_settings['deadline'] = None +if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0): + current_settings['timeout'] = hypothesis.unlimited +def assert_deadline_disabled(): + if hypothesis_version < (3, 27, 0): + import warnings + warning_message = ( + "Your version of hypothesis is outdated. " + "To avoid `DeadlineExceeded` errors, please update. " + f"Current hypothesis version: {hypothesis.__version__}" + ) + warnings.warn(warning_message) + else: + assert settings().deadline is None diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1078a189f69c273ca0e1c3f59357fb72e272301c --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py @@ -0,0 +1,84 @@ +# mypy: ignore-errors + +import torch +import re +import unittest +import functools +from subprocess import CalledProcessError +import torch._inductor.async_compile # noqa: F401 required to warm up AsyncCompile pools +from torch._inductor.codecache import CppCodeCache +from torch.utils._triton import has_triton +from torch.testing._internal.common_utils import ( + LazyVal, + IS_FBCODE, +) +from torch.testing._internal.common_utils import TestCase + +def test_cpu(): + try: + CppCodeCache.load("") + return not IS_FBCODE + except ( + CalledProcessError, + OSError, + torch._inductor.exc.InvalidCxxCompiler, + torch._inductor.exc.CppCompileError, + ): + return False + +HAS_CPU = LazyVal(test_cpu) + +HAS_CUDA = torch.cuda.is_available() and has_triton() + +HAS_XPU = torch.xpu.is_available() and has_triton() + +HAS_GPU = HAS_CUDA or HAS_XPU + +GPUS = ["cuda", "xpu"] + +HAS_MULTIGPU = any( + getattr(torch, gpu).is_available() and getattr(torch, gpu).device_count() >= 2 + for gpu in GPUS +) + +tmp_gpus = [x for x in GPUS if getattr(torch, x).is_available()] +assert len(tmp_gpus) <= 1 +GPU_TYPE = "cuda" if len(tmp_gpus) == 0 else tmp_gpus.pop() +del tmp_gpus + +def _check_has_dynamic_shape( + self: TestCase, + code, +): + for_loop_found = False + has_dynamic = False + lines = code.split("\n") + for line in lines: + if "for(" in line: + for_loop_found = True + if re.search(r";.*ks.*;", line) is not None: + has_dynamic = True + break + self.assertTrue( + has_dynamic, msg=f"Failed to find dynamic for loop variable\n{code}" + ) + self.assertTrue(for_loop_found, f"Failed to find for loop\n{code}") + + +def skipDeviceIf(cond, msg, *, device): + if cond: + def decorate_fn(fn): + def inner(self, *args, **kwargs): + if self.device == device: + raise unittest.SkipTest(msg) + return fn(self, *args, **kwargs) + return inner + else: + def decorate_fn(fn): + return fn + + return decorate_fn + +skipCUDAIf = functools.partial(skipDeviceIf, device="cuda") +skipXPUIf = functools.partial(skipDeviceIf, device="xpu") +skipCPUIf = functools.partial(skipDeviceIf, device="cpu") diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8171a95918939f2e34f352fe93ca5f0ab7e4bfa4 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py @@ -0,0 +1,722 @@ +# mypy: ignore-errors + +# Torch +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 +import torch.nn.functional as F +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +from torch.testing._internal.common_nn import module_tests, new_module_tests +from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like + +import collections +from copy import deepcopy +from typing import Any, Dict, List, Union +import math # noqa: F401 + +# Testing utils +from torch import inf + +assert torch.get_default_dtype() == torch.float32 + +L = 20 +M = 10 +S = 5 + + +def unpack_variables(args): + if isinstance(args, tuple): + return tuple(unpack_variables(elem) for elem in args) + else: + return args + +class dont_convert(tuple): + pass + +non_differentiable = collections.namedtuple('non_differentiable', ['tensor']) + +def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None): + if not isinstance(call_args, tuple): + call_args = (call_args,) + + def map_arg(arg): + def maybe_non_contig(tensor): + if not non_contiguous or tensor.numel() < 2: + return tensor.clone() + + return noncontiguous_like(tensor) + + def conjugate(tensor): + return tensor.conj() + + if isinstance(arg, (torch.Size, dont_convert)): + return arg + elif isinstance(arg, tuple) and len(arg) == 0: + var = conjugate(torch.randn((), dtype=dtype, device=device)) + var.requires_grad = requires_grad + return var + elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor): + return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad) + # double check casting + elif isinstance(arg, non_differentiable): + if isinstance(arg.tensor, torch.Tensor): + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + elif isinstance(arg, torch.Tensor): + if arg.is_complex() != dtype.is_complex: + raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ", + "which is not supported for now") + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards + v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone() + v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex()) + return v + elif callable(arg): + return map_arg(arg(dtype=dtype, device=device)) + else: + return arg + args_out = tuple(map_arg(arg) for arg in call_args) + kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {} + return args_out, kwargs_out + +# NB: JIT script tests for all nn functional interfaces, script mode does +# not support in_place operations yet, so no inplace operation tests added. +# removed all the deprecated functions +# +# ( +# method name, +# input size/constructing fn, +# args (tuple represents shape of a tensor arg), +# test variant name(will be used at test name suffix, +# 'inplace' skips grad tests), // optional +# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional +# fn to determine if test should be skipped, // optional +# fn mapping output to part that should be gradcheck'ed, // optional +# kwargs for function, // optional +# ) +nn_functional_tests = [ + ('conv1d', (S, S, S), ((S, S, S),)), + ('conv2d', (S, S, S, S), ((S, S, S, S),)), + ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_transpose1d', (S, S, S), ((S, S, S),)), + ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)), + ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)), + ('avg_pool1d', (S, S, S), (3,)), + ('avg_pool2d', (S, S, S, S), (3,), '', (True,)), + ('avg_pool3d', (S, S, S, S, S), (3,)), + ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)), + ('max_pool1d', (S, S, S), (2, 1)), + ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'), + ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')), + ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')), + ('max_pool3d', (S, S, S, S, S), (2, 1)), + ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)), + ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)), + ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)), + ('lp_pool1d', (S, S, S), (2., 3, 2,)), + ('lp_pool2d', (S, S, S, S), (2., 3, 2,)), + ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)), + ('adaptive_max_pool1d', (S, S, S), (5,)), + ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)), + ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)), + ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)), + ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)), + ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)), + ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')), + ('alpha_dropout', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S, S), (0.5,), 'batched'), + ('dropout3d', (S, S, S, S), (0.5,)), + ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'), + ('feature_alpha_dropout', (S, S, S), (0.5,)), + ('threshold', (S, S, S), (0.1, 2.), '', (True,)), + ('threshold', (S, S, S), (0.1, 2., True), 'inplace'), + ('relu', (S, S, S), (), '', (True,)), + ('relu', (S, S, S), (), 'inplace'), + ('glu', (S - 1, S - 1, S - 1), (),), + ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)), + ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'), + ('relu6', (S, S, S), (), '', (True,)), + ('relu6', (S, S, S), (True), 'inplace'), + ('elu', (S, S, S), (0.9,),), + ('elu', (S, S, S), (0.9, True), 'inplace'), + ('selu', (S, S, S), (),), + ('selu', (S, S, S), (True), 'inplace'), + ('celu', (S, S, S), (0.9,),), + ('celu', (S, S, S), (0.9, True), 'inplace'), + ('leaky_relu', (S, S, S), (0.02,), '', (True,)), + ('leaky_relu', (S, S, S), (0.02,), 'inplace'), + ('rrelu', (S, S), (0.1, 0.3, False),), + ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'), + ('hardshrink', (S, S, S), (0.4,), '', (True,)), + ('tanhshrink', (S, S, S), (),), + ('softsign', (S, S, S), (),), + ('softplus', (S, S, S), (), '', (True,)), + ('softmin', (S, S, S), (0,),), + ('softmax', (S, S, S), (0,), '', (True,)), + ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)), + ('tanh', (S, S, S), (), '', (True,)), + ('sigmoid', (S, S, S), (), '', (True,)), + ('silu', (S, S, S), (), '', (True,)), + ('log_softmax', (S, S, S), (0,), '', (True,)), + ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])), + ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])), + ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),), + ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)), + ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ), + 'training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), True, ), + 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, True, ), + 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, None, False, ), + 'inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ), + 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), False, ), + 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, False, ), + 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')), + ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),), + ('layer_norm', (S, S, S, S), ([5],), '', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)), + non_differentiable(torch.rand(S))), 'with_weight_and_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])), + ('group_norm', (S, S, S), (1, torch.rand(5),),), + ('local_response_norm', (S, S, S), (2, ),), + ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'), + ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),), + ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),), + ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),), + ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('margin_ranking_loss', (S,), ((S,), (S,)),), + ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),), + ('pixel_shuffle', (1, 9, 4, 4), (3,),), + ('pixel_unshuffle', (1, 1, 12, 12), (3,),), + ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),), + ('pad', (3, 3, 4, 2), ([1, 1],),), + ('pairwise_distance', (S, S), ((S, S),),), + ('pdist', (S, S), (),), + ('cosine_similarity', (S, S), ((S, S),),), + ('triplet_margin_loss', (S, S), ((S, S), (S, S)),), + ('normalize', (S, S, S), (),), + ('unfold', (S, S, S, S), ([2, 3]),), + ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),), + ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),), + ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),), + ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)), + 1, 1., non_differentiable(torch.randn(S))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), + (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'), + ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(), + (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long), + torch.randint(1, S, (S,), dtype=torch.long))), + ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'), + ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False), + 'nearest_4d_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False), + 'nearest_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False), + 'bilinear_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False), + 'bilinear_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False), + 'bicubic_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False), + 'bicubic_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False), + 'nearest_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False), + 'nearest_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False), + 'linear_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False), + 'linear_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False), + 'nearest_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False), + 'nearest_5d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False), + 'trilinear_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False), + 'trilinear_5d_with_size_not_recompute_scale_factor'), +] + +script_template = ''' +def the_method({}): + return {} +''' + +def value_to_literal(value): + if isinstance(value, str): + # Quotes string and escapes special characters + return ascii(value) + if isinstance(value, torch.Tensor): + return 'torch.' + str(value) + else: + return str(value) + +def get_call(method_name, func_type, args, kwargs): + kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()]) + self_arg = args[0] + if func_type == 'method': + args = args[1:] + + argument_str = ', '.join(args) + argument_str += ', ' if len(args) and len(kwargs) else '' + argument_str += kwargs_str + + if func_type == 'functional' or func_type == 'function': + call = f'torch.{method_name}({argument_str})' + elif func_type == 'method': + call = f'{self_arg}.{method_name}({argument_str})' + elif func_type == 'nn_functional': + call = f'torch.nn.functional.{method_name}({argument_str})' + else: + raise TypeError('Unsupported function type') + + return call + +def get_constant(x): + if x == inf: + return 'math.inf' + if x == -inf: + return '-math.inf' + return x + +def get_script_args(args): + formals: List[str] = [] + tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + actuals: List[str] = [] + for arg in args: + if isinstance(arg, torch.Tensor): + name = f'i{len(formals)}' + formals.append(name) + actuals.append(name) + tensors.append(arg) + elif is_iterable_of_tensors(arg): + name = f'i{len(formals)}' + formals.append(name + ': List[torch.Tensor]') + actuals.append(name) + tensors.append(list(arg)) + elif isinstance(arg, str): + actuals.append(f"'{arg}'") + else: + actuals.append(str(get_constant(arg))) + return (formals, tensors, actuals) + +# create a script function from (name, func_type, output_process_fn), +# and returns the compiled function and example inputs +def gen_script_fn_and_args(method_name, func_type, *args, **kwargs): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + return CU.the_method, tensors + +# create a script function from (name, func_type), +# returns a function takes in (args, kwargs) and runs the compiled function +def create_script_fn(self, method_name, func_type): + # function returns tuple containing original output and + # filtered output to be used in checking gradients + def script_fn(*args, **kwargs): + fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs) + self.assertExportImport(fn.graph, tensors) + output = fn(*tensors) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined] + return output + return script_fn + +class SplitInputs: + all_tensors: List[Any] + tensor_args: List[Any] + nontensor_args: List[Any] + arg_types: List[str] + tensor_kwargs: Dict[str, Any] + kwarg_order: List[str] + nontensor_kwargs: Dict[str, Any] + kwarg_types: Dict[str, Any] + + @staticmethod + def _is_tensor_input(arg): + return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg) + + def __init__(self, args, kwargs): + self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args] + self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()} + self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)] + self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)] + self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)} + self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)} + self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]] + self.kwarg_order = [k for k, v in kwargs.items()] + + def nontensors_match(self, other: 'SplitInputs'): + if self.arg_types != other.arg_types: + return False + if self.kwarg_types != other.kwarg_types: + return False + if self.kwarg_order != other.kwarg_order: + return False + if self.nontensor_args != other.nontensor_args: + return False + if self.nontensor_kwargs != other.nontensor_kwargs: + return False + return True + +# make a new function where all non-tensor arguments in 'args' have been partially +# applied, and all tensor arguments remain. +# used to trace functions when some arguments are not tensors +def partial_apply_nontensors(fn, args, kwargs): + inputs = SplitInputs(args, kwargs) + + def new_fn(*tensors_): + tensors = iter(tensors_) + full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)] + full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()} + return fn(*full_args, **full_kwargs) + + return new_fn, inputs + +# create a trace function from input fn +def create_traced_fn(self, fn, cache_traced_fn=False): + def traced_fn(*inputs, **kwargs): + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `check_against_reference` already does all the checks + # against python function + fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs) + if not cache_traced_fn or not hasattr(traced_fn, 'traced'): + traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False) + self.assertExportImport(traced.graph, split_inputs.all_tensors) + output = traced(*split_inputs.all_tensors) + if cache_traced_fn: + traced_fn.traced = traced + traced_fn.split_inputs = split_inputs + else: + # Guard to check that nontensor inputs are the same as during tracing + self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs)) + output = traced_fn.traced(*split_inputs.all_tensors) + traced = traced_fn.traced + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined] + traced_fn.graph = traced.graph # type: ignore[attr-defined] + return output + return traced_fn + +# known to be failing in script +EXCLUDE_SCRIPT = { + 'test_norm_fro_default', + 'test_norm_fro_cpu', + 'test_norm_nuc', + 'test_norm_fro', + 'test_norm_nuc_batched', + + # aten op has additional cudnn argument + 'test_nn_unfold', + + # flaky test - TODO fix + 'test_nn_ctc_loss', + + # unknown builtin op + 'test_nn_fold', + + # jit doesn't support sparse tensors. + 'test_to_sparse', + 'test_to_sparse_dim', +} + +# generates a script function and set of example inputs +# from a specified test in the format of nn_functional_tests +def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args): + test_name = 'test_nn_' + name + + if variant_name != '': + test_name = test_name + '_' + variant_name + + no_grad = variant_name == 'inplace' + + self_variable = create_input((self_size,))[0][0] + kwargs = None + + # need to record this because methods can change the size (e.g. unsqueeze) + args_variable, kwargs_variable = create_input(args) + + self_tensor = deepcopy(self_variable.data) + args_tensor = deepcopy(unpack_variables(args_variable)) + + f_args_variable = (self_variable,) + args_variable + f_args_tensor = (self_tensor,) + args_tensor + with torch._jit_internal._disable_emit_hooks(): + script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable) + return script_fn, inputs + + +# additional modules test +# TODO: delete this list once we make all nn_tests work +additional_module_tests = [ + { + 'module_name': 'Bilinear', + 'constructor_args': (S, S, M), + 'input_size': (S, S), + 'extra_args': ((S, S),) + }, + { + 'module_name': 'RNNCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'LSTMCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'GRUCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'MultiheadAttention', + 'constructor_args': (128, 8), + 'input_size': (10, 8, 128), + 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)), + 'slowTest': True + }, + { + 'module_name': 'Transformer', + 'constructor_args': (1, 1, 1, 1, 2), + 'input_size': (3, 1, 1), + 'extra_args': (torch.randn(1, 1, 1),), + 'slowTest': True + } +] + +EXCLUDE_SCRIPT_MODULES = { + 'test_nn_AdaptiveAvgPool2d_tuple_none', + 'test_nn_AdaptiveAvgPool3d_tuple_none', + 'test_nn_AdaptiveMaxPool2d_tuple_none', + 'test_nn_AdaptiveMaxPool3d_tuple_none', + + # Doesn't use future division, so this is not supported + 'test_nn_CrossMapLRN2d', + # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented + 'test_nn_TransformerDecoderLayer_gelu_activation', + 'test_nn_TransformerDecoderLayer_relu_activation', + 'test_nn_TransformerEncoderLayer_gelu_activation', + 'test_nn_TransformerEncoderLayer_relu_activation', + 'test_nn_Transformer_multilayer_coder', +} + +script_method_template = ''' +def forward({}): + return {} +''' + +def create_script_module(self, nn_module, constructor_args, *args, **kwargs): + def script_module(*args, **kwargs): + formals, tensors, actuals = get_script_args(args) + + method_args = ', '.join(['self'] + actuals) + call_args_str = ', '.join(actuals) + call = f"self.submodule({call_args_str})" + script = script_method_template.format(method_args, call) + + submodule_constants = [] + if kwargs.get('is_constant'): + submodule_constants = ['submodule'] + + # Create module to use the script method + class TheModule(torch.jit.ScriptModule): + __constants__ = submodule_constants + + def __init__(self): + super().__init__() + self.submodule = nn_module(*constructor_args) + + def make_module(script): + module = TheModule() + # check __repr__ + str(module) + module.define(script) + return module + + module = make_module(script) + if self: + self.assertExportImportModule(module, tensors) + module(*args) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + create_script_module.last_graph = module.graph # type: ignore[attr-defined] + return module + return script_module + +def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + # to clean up IR + torch._C._jit_pass_inline(CU.the_method.graph) + torch._C._jit_pass_constant_propagation(CU.the_method.graph) + torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name) + +def get_nn_module_name_from_kwargs(**kwargs): + if 'module_name' in kwargs: + return kwargs['module_name'] + elif 'fullname' in kwargs: + return kwargs['fullname'] + elif 'constructor' in kwargs: + return kwargs['constructor'].__name__ + +def get_nn_mod_test_name(**kwargs): + if 'fullname' in kwargs: + test_name = kwargs['fullname'] + else: + test_name = get_nn_module_name_from_kwargs(**kwargs) + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + return f'test_nn_{test_name}' + +def get_nn_module_class_from_kwargs(**kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + index = name.find("_") + if index == -1: + return name + else: + return name[0:name.find("_")] + +def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + + if 'desc' in kwargs and 'eval' in kwargs['desc']: + # eval() is not supported, so skip these tests + return + + test_name = name + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + test_name = get_nn_mod_test_name(**kwargs) + + if test_name in EXCLUDE_SCRIPT_MODULES: + return + if 'constructor' in kwargs: + nn_module = kwargs['constructor'] + else: + nn_module = getattr(torch.nn, name) + + if "FunctionalModule" in str(nn_module): + return + + if 'constructor_args_fn' in kwargs: + constructor_args = kwargs['constructor_args_fn']() + else: + constructor_args = kwargs.get('constructor_args', ()) + + # Set up inputs from tuple of sizes or constructor fn + input_dtype = torch.double + if 'input_fn' in kwargs: + input = kwargs['input_fn']() + if isinstance(input, torch.Tensor): + input = (input,) + + if all(tensor.is_complex() for tensor in input): + input_dtype = torch.cdouble + else: + input = (kwargs['input_size'],) + + # Extra parameters to forward() + if 'extra_args' in kwargs: + input = input + kwargs['extra_args'] + + if 'target_size' in kwargs: + input = input + (kwargs['target_size'],) + elif 'target_fn' in kwargs: + if torch.is_tensor(input): + input = (input,) + input = input + (kwargs['target_fn'](),) + + args_variable, kwargs_variable = create_input(input, dtype=input_dtype) + f_args_variable = deepcopy(unpack_variables(args_variable)) + out_var = deepcopy(f_args_variable) + + args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable) + + return mod, out_var + + +def get_all_nn_module_tests(): + return module_tests + new_module_tests + additional_module_tests diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c0109ecacf7f7d3fd6a1849cdf127b7f41a49578 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py @@ -0,0 +1,893 @@ +# mypy: ignore-errors + +# Torch +from torch.autograd import Variable +from torch.autograd.function import _nested_map +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 + +from torch.onnx import OperatorExportTypes +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized +import zipfile +import functools + +# Testing utils +from torch.testing import FileCheck +from torch.testing._internal.common_utils import IS_WINDOWS, \ + freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \ + is_iterable_of_tensors +from torch.testing._internal.common_jit import JitCommonTestCase +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from contextlib import contextmanager +from functools import reduce +from io import StringIO +from collections import defaultdict + +import importlib.util +import inspect +import io +import math +import os +import pickle +import sys +import tempfile +import textwrap +from importlib.abc import Loader +from typing import Any, Dict, List, Tuple, Union + +RUN_CUDA = torch.cuda.is_available() +RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1 +RUN_CUDA_HALF = RUN_CUDA +# HIP supports half, no version check necessary +if torch.cuda.is_available() and not torch.version.hip: + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(torch.cuda.device_count()): + major = torch.cuda.get_device_capability(d)[0] + if (major < 6): + RUN_CUDA_HALF = False + +def execWrapper(code, glob, loc): + exec(code, glob, loc) + +def do_input_map(fn, input): + return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input) + +def clear_class_registry(): + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + +def get_execution_plan(graph_executor_state): + execution_plans = list(graph_executor_state.execution_plans.values()) + num_plans = len(execution_plans) + if num_plans != 1: + raise RuntimeError('This test assumes this GraphExecutor should ' + f'only have one execution plan, got: {num_plans}') + return execution_plans[0] + +class _AssertRaisesRegexWithHighlightContext: + """ + A context manager that is useful for checking that error messages highlight + the correct part of the source code. + """ + + def __init__(self, test_case, exception, regex, highlight): + self.test_case = test_case + self.exception_type = exception + self.regex = regex + self.highlight = highlight + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + with self.test_case.assertRaisesRegex(self.exception_type, self.regex): + if type: + raise value + + if self.highlight: + FileCheck().check_source_highlighted(self.highlight).run(str(value)) + + return True + +FUSION_GROUP = "prim::TensorExprGroup" + +class JitTestCase(JitCommonTestCase): + _do_cuda_memory_leak_check = True + _restored_warnings = False + + class capture_stdout(list): + """ + Replace sys.stdout with a temporary StringIO + """ + def __enter__(self): + self.sys_stdout = sys.stdout + self.stringio = StringIO() + sys.stdout = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stdout = self.sys_stdout + + class capture_stderr(list): + """ + Replace sys.stderr with a temporary StringIO + """ + def __enter__(self): + self.sys_stderr = sys.stderr + self.stringio = StringIO() + sys.stderr = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stderr = self.sys_stderr + + def setHooks(self): + torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook) + + def clearHooks(self): + torch._C._jit_set_emit_hooks(None, None) + + def setUp(self): + super().setUp() + # unittest overrides all warning filters and forces all of them to show up + # after we install our own to silence those coming from inside PyTorch. + # This will ensure that our filter still takes precedence. + if not JitTestCase._restored_warnings: + torch.jit.TracerWarning.ignore_lib_warnings() + JitTestCase._restored_warnings = True + self.setHooks() + + def tearDown(self): + super().tearDown() + # needs to be cleared because python might be unloaded before + # the callback gets destructed + self.clearHooks() + clear_class_registry() + + def assertAllFused(self, graph, except_for=()): + + # note this helper collects nodes on 'fast path' only + # i.e. the true blocks of specialized checks + def get_nodes_and_parents_recursively(block, kind, acc): + for node in block.nodes(): + if node.kind() == kind: + acc[block].append(node) + elif node.kind() == 'prim::DifferentiableGraph': + get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc) + elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or + node.inputs().__next__().node().kind() == 'prim::TypeCheck' or + node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'): + get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc) + else: + for inner_block in node.blocks(): + get_nodes_and_parents_recursively(inner_block, kind, acc) + + allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate', + 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for) + + fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list) + get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups) + self.assertTrue(len(fusion_groups) == 1, f'got {graph}') + (graph, fusion_nodes) = next(iter(fusion_groups.items())) + # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes` + self.assertTrue(len(fusion_nodes) == 1, f'got {graph}') + self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()), + f'got {graph}') + + def _isHookExceptionOk(self, e): + se = str(e) + allowed = ("Could not export Python function", + "closures are not exportable") + for a in allowed: + if a in se: + return True + return False + + def _compared_saved_loaded(self, m): + def extract_files(buffer): + # crack open the zip format to get at the main module code + archive = zipfile.ZipFile(buffer) + # check that we have no duplicate names + self.assertEqual(len(set(archive.namelist())), len(archive.namelist())) + files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist())) + # unwrap all the code files into strings + code_files_str = filter(lambda x: x.endswith('.py'), files) + code_files_stream = (archive.open(f) for f in code_files_str) + code_files = ("".join([line.decode() for line in file]) for file in code_files_stream) + + # unpickled all the debug files + debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files) + debug_files_stream = (archive.open(f) for f in debug_files_str) + debug_files = (pickle.load(f) for f in debug_files_stream) + return code_files, debug_files + + # disable the hook while we parse code, otherwise we will re-enter the hook + with torch._jit_internal._disable_emit_hooks(): + try: + # short-circuit if this is an empty function or module + if len(m.code) == 0: + return + if isinstance(m, torch._C.ScriptModule): + if len(m._method_names()) == 0: + return + + # save the module to a buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + # copy the data in the buffer so we can restore it later. This + # is because py2 and py3 have different semantics with zipfile + # and it's easier to just work with a fresh copy each time. + buffer_copy = buffer.getvalue() + + code_files, debug_files = extract_files(buffer) + + except RuntimeError as e: + if not self._isHookExceptionOk(e): + raise + else: + return + + # import the model again (from a the copy we made of the original) + buffer2 = io.BytesIO(buffer_copy) + imported = torch.jit.load(buffer2) + + # save it again + saved_module_buffer_2 = io.BytesIO() + torch.jit.save(imported, saved_module_buffer_2) + + saved_module_buffer_2.seek(0) + code_files_2, debug_files_2 = extract_files(saved_module_buffer_2) + + for a, b in zip(code_files, code_files_2): + self.assertMultiLineEqual(a, b) + + if isinstance(m, torch._C.ScriptModule): + self.assertTrue(torch._C._ivalue_tags_match(m, imported._c)) + + + def emitFunctionHook(self, func): + # func has invalid names for export, skip the jitter check + if func.name == "" or "aten::" in func.name: + return + self._compared_saved_loaded(func) + + def emitModuleHook(self, module): + self._compared_saved_loaded(module) + + + def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None) + torch.jit.save(m, buffer) + m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + + if not also_test_file: + return imported + + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + f = tempfile.NamedTemporaryFile(delete=False) + try: + f.close() + imported.save(f.name) + result = torch.jit.load(f.name, map_location=map_location) + finally: + os.unlink(f.name) + + result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + return result + + def assertGraphContains(self, graph, kind, consider_subgraphs=False): + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + self.assertTrue(count > 0) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + self.assertTrue(len(out_nodes) > 0) + + def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False): + def perform_assert(graph, kind, actual, expected, consider_subgraphs): + if actual == expected: + return + subgraph = 'including' if consider_subgraphs else 'excluding' + raise AssertionError( + f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}') + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + perform_assert(graph, kind, count, num_kind_nodes, + consider_subgraphs) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + perform_assert(graph, kind, len(out_nodes), num_kind_nodes, + consider_subgraphs) + + def assertExpectedONNXGraph(self, g, *args, **kwargs): + g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX) + self.assertExpectedGraph(g, *args, **kwargs) + + def assertExpectedGraph(self, trace, *args, **kwargs): + if isinstance(trace, torch._C.Graph): + graph = trace + else: + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + self.assertExpected(str(graph), *args, **kwargs) + + def run_pass(self, name, trace): + if isinstance(trace, torch._C.Graph): + graph = trace + set_graph = False + else: + set_graph = True + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + result = getattr(torch._C, '_jit_pass_' + name)(graph) + if result is not None and not isinstance(result, bool): + graph = result + torch._C._jit_pass_lint(graph) + + if set_graph: + trace.set_graph(graph) + return graph + + def get_frame_vars(self, frames_up): + frame = inspect.currentframe() + if not frame: + raise RuntimeError("failed to inspect frame") + i = 0 + while i < frames_up + 1: + frame = frame.f_back + if not frame: + raise RuntimeError("failed to get frame") + i += 1 + defined_vars: Dict[str, Any] = {} + defined_vars.update(frame.f_locals) + defined_vars.update(frame.f_globals) + return defined_vars + + def assertRaisesRegexWithHighlight(self, exception, regex, highlight): + return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight) + + def checkScriptRaisesRegex(self, script, inputs, exception, regex, + name=None, outputs=None, capture_output=False, + frames_up=1, profiling=ProfilingMode.PROFILING): + """ + Checks that a given function will throw the correct exception, + when executed with normal python, the string frontend, and the + AST frontend. Logic taken from `checkScript` (see comments there + for details) + """ + with enable_profiling_mode_for_profiling_tests(): + # Normal Python + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + else: + python_fn = script + + python_fn(*inputs) + + # String frontend + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + string_frontend = getattr(cu, name) + else: + source = textwrap.dedent(inspect.getsource(script)) + cu = torch.jit.CompilationUnit(source, _frames_up=frames_up) + string_frontend = getattr(cu, script.__name__) + + string_frontend(*inputs) + + # Python AST frontend + if not isinstance(script, str): + with self.assertRaisesRegex(exception, regex): + ge = torch.jit.script(python_fn) + ge(*inputs) + + def checkBailouts(self, model, inputs, expected): + state = model.get_debug_state() + plan = get_execution_plan(state) + num_bailouts = plan.code.num_bailouts() + for i in range(0, num_bailouts): + plan.code.request_bailout(i) + bailout_outputs = model(*inputs) + self.assertEqual(bailout_outputs, expected) + + def checkScript(self, + script, + inputs, + name='func', + optimize=True, + inputs_requires_grad=False, + capture_output=False, + frames_up=1, + profiling=ProfilingMode.PROFILING, + atol=None, + rtol=None): + """ + Checks that a given script generates the same output as the Python + version using the given inputs. + """ + with torch.jit.optimized_execution(optimize): + with enable_profiling_mode_for_profiling_tests(): + extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs) + if isinstance(script, str): + # Compile the string to a Script function + # with enable_profiling_mode(): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + + # Execute the Python function so we can run it later and get its + # outputs + + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + scripted_fn = getattr(cu, name) + else: + + # Check the string frontend first + source = textwrap.dedent(inspect.getsource(script)) + self.checkScript( + source, + inputs, + script.__name__, + optimize=optimize, + inputs_requires_grad=inputs_requires_grad, + capture_output=capture_output, + profiling=profiling, + frames_up=2) + + # Continue checking the Python frontend + scripted_fn = torch.jit.script(script, _frames_up=1) + python_fn = script + + if inputs_requires_grad: + recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs) + else: + recording_inputs = inputs + + if capture_output: + with self.capture_stdout() as script_stdout: + script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as opt_script_stdout: + opt_script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as _python_stdout: + python_outputs = python_fn(*inputs) + if not IS_WINDOWS: + self.assertExpected(script_stdout[0], subname='stdout') + self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol) + else: + # profiling run + script_outputs = scripted_fn(*recording_inputs) + if inputs_requires_grad or extra_profile_runs: + opt_script_outputs = scripted_fn(*recording_inputs) + # optimized run + opt_script_outputs = scripted_fn(*recording_inputs) + if TEST_BAILOUTS: + self.checkBailouts(scripted_fn, inputs, opt_script_outputs) + python_outputs = python_fn(*inputs) + self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol) + self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol) + return scripted_fn + + def checkTrace(self, func, reference_tensors, input_tensors=None, + drop=None, allow_unused=False, verbose=False, + inputs_require_grads=True, check_tolerance=1e-5, export_import=True, + _force_outplace=False, grad_atol=None, grad_rtol=None): + + # TODO: check gradients for parameters, not just inputs + def allSum(vs): + # drop allows us to remove some values from ever being used + # to test unused outputs + if drop is not None: + vs = vs[:-drop] + # we don't want all the grad for all the outputs to be the same + # so we multiply each by a constant + return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None) + if input_tensors is None: + input_tensors = reference_tensors + + def flatten_inputs(inputs): + def input_reduce(input, fn, acc): + if isinstance(input, torch.Tensor): + fn(input, acc) + elif isinstance(input, dict): + reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc) + else: + reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc) + return acc + return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), [])) + + nograd_inputs = reference_tensors + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + else: + recording_inputs = reference_tensors + + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `checkTrace` already does all the checks + # against python function + ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance, + _force_outplace=_force_outplace, check_trace=False) + + if export_import: + ge = self.getExportImportCopy(ge) + + if verbose: + print(ge.graph) + + # test no gradients case + outputs = func(*nograd_inputs) + outputs_ge = ge(*nograd_inputs) + self.assertEqual(outputs, outputs_ge) + + # test gradients case + outputs = func(*recording_inputs) + if inputs_require_grads: + grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs, + allow_unused=allow_unused) + + outputs_ge = ge(*recording_inputs) + if inputs_require_grads: + grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + + # test the grad grad case + outputs = func(*recording_inputs) + l1 = allSum(outputs) + if inputs_require_grads: + grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True, + allow_unused=allow_unused) + if inputs_require_grads: + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused) + + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + + outputs_ge = ge(*recording_inputs) + l1_ge = allSum(outputs_ge) + if inputs_require_grads: + grads_ge = torch.autograd.grad( + l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused) + + if inputs_require_grads: + l2_ge = (allSum(grads_ge) * l1_ge) + grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + for g2, g2_ge in zip(grads2, grads2_ge): + if g2 is None and g2_ge is None: + continue + self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4) + + return ge + + def checkModule(self, nn_module, args): + """ + Check that a nn.Module's results in Script mode match eager and that it + can be exported + """ + sm = torch.jit.script(nn_module) + + with freeze_rng_state(): + eager_out = nn_module(*args) + + with freeze_rng_state(): + script_out = sm(*args) + + self.assertEqual(eager_out, script_out) + self.assertExportImportModule(sm, args) + + return sm + +class NoTracerWarnContextManager: + def __enter__(self): + self.prev = torch._C._jit_get_tracer_state_warn() + torch._C._jit_set_tracer_state_warn(False) + + def __exit__(self, *args): + torch._C._jit_set_tracer_state_warn(self.prev) + +@contextmanager +def inline_everything_mode(should_inline): + old = torch._C._jit_get_inline_everything_mode() + torch._C._jit_set_inline_everything_mode(should_inline) + try: + yield + finally: + torch._C._jit_set_inline_everything_mode(old) + +@contextmanager +def set_fusion_group_inlining(inlining): + old = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(inlining) + try: + yield + finally: + torch._C._debug_set_fusion_group_inlining(old) + +# note: not re-entrant, use unnested only +@contextmanager +def disable_autodiff_subgraph_inlining(enabled=True): + torch._C._debug_set_autodiff_subgraph_inlining(not enabled) + try: + yield + finally: + torch._C._debug_set_autodiff_subgraph_inlining(True) + +def _inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(True): + fn(*args, **kwargs) + return wrapper + +# this exists for forward compatibility reasons temporarily. +# TODO(suo) remove +def _tmp_donotuse_dont_inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(False): + fn(*args, **kwargs) + return wrapper + +# make it easy to quicky define/trace a function for these tests +def _trace(*args, **kwargs): + def wrapper(func): + return torch.jit.trace(func, args, **kwargs) + return wrapper + + +def enable_cpu_fuser(fn): + def wrapper(*args, **kwargs): + torch._C._jit_override_can_fuse_on_cpu_legacy(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_set_te_must_use_llvm_cpu(False) + try: + fn(*args, **kwargs) + finally: + torch._C._jit_override_can_fuse_on_cpu_legacy(False) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_set_te_must_use_llvm_cpu(True) + return wrapper + + +def enable_cpu_fuser_if(cond): + if cond: + return enable_cpu_fuser + else: + def noop_fuser(fn): + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + return wrapper + return noop_fuser + +def get_forward(c): + return c._get_method('forward') + +def get_forward_graph(c): + return c._get_method('forward').graph + +def get_module_method(m, module, method): + return m._c.getattr(module)._get_method(method) + +def attrs_with_prefix(module, prefix): + return [x for x, _ in module._modules._c.items() + if x.startswith(prefix)] + +def warmup_backward(f, *args): + profiling_count = 3 + results = [] + for i in range(profiling_count): + if len(args) > 0: + r = torch.autograd.grad(f, *args) + results.append(r) + else: + f.backward(retain_graph=True) + + return results + +# TODO: Remove me once https://bugs.python.org/issue42666 is resolved +def make_global(*args): + for arg in args: + setattr(sys.modules[arg.__module__], arg.__name__, arg) + +# Helper function to eval Python3 code without causing a syntax error for +# this file under py2 +def _get_py3_code(code, fn_name): + with tempfile.TemporaryDirectory() as tmp_dir: + script_path = os.path.join(tmp_dir, 'script.py') + with open(script_path, 'w') as f: + f.write(code) + spec = importlib.util.spec_from_file_location(fn_name, script_path) + module = importlib.util.module_from_spec(spec) + loader = spec.loader + assert isinstance(loader, Loader) # Assert type to meet MyPy requirement + loader.exec_module(module) + fn = getattr(module, fn_name) + return fn + +class TensorExprTestOptions: + def __init__(self): + self.old_profiling_executor = torch._C._jit_set_profiling_executor(True) + self.old_profiling_mode = torch._C._get_graph_executor_optimize(True) + + self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu() + self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() + torch._C._jit_set_texpr_fuser_enabled(True) + self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(False) + self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu() + torch._C._jit_set_te_must_use_llvm_cpu(False) + + def restore(self): + torch._C._jit_set_profiling_executor(self.old_profiling_executor) + torch._C._get_graph_executor_optimize(self.old_profiling_mode) + + torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state) + torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state) + torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state) + torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining) + torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu) + +def clone_inputs(args): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(arg.detach().clone()) + elif is_iterable_of_tensors(arg): + inputs.append([t.detach().clone() for t in arg]) + else: + inputs.append(arg) + + return inputs + +def get_traced_sample_variant_pairs(device, dtype, op): + # tuples of (variant, sample) + outputs: List[Tuple[Any, Any]] = [] + + samples = op.sample_inputs(device, dtype) + + # Acquires variants to test + func = op.get_op() + method = op.get_method() + variants = { + # TODO: inplace tests currently fail, fix and add inplace variant + 'function': func, 'method': method, + } + + # TODO: find better way to standardize on op registration itself.. + has_fake_function = op.name in ["resize_", 'resize_as_'] + + if has_fake_function: + variants = {'method': getattr(torch.Tensor, op.name)} + + # In eager mode, these ops can take (Tensor, bool) args; but in + # JIT they can only take (Tensor, Scalar), and bool is not a + # scalar in the JIT type system. So to test these in JIT, the bool + # is converted to an int for the test. + ops_with_unsupported_bool_args = [ + { + "name": "div_floor_rounding", + "arg_idx": [0], + }, + { + "name": "div_no_rounding_mode", + "arg_idx": [0], + }, + { + "name": "div_trunc_rounding", + "arg_idx": [0], + }, + { + "name": "index_fill", + "arg_idx": [2], + }, + { + "name": "full_like", + "arg_idx": [0], + }, + { + "name": "mul", + "arg_idx": [0], + }, + { + "name": "new_full", + "arg_idx": [1], + }, + ] + + # doesn't support tracing + if has_fake_function: + return outputs + + for sample in samples: + for variant in variants.values(): + if variant is None: + continue + + if is_lambda(variant): + continue + + matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args) + for op_data in matching_ops: + for idx in op_data["arg_idx"]: + args = list(sample.args) + if len(sample.args) > idx and isinstance(sample.args[idx], bool): + args[idx] = int(args[idx]) + sample.args = tuple(args) + + outputs.append((variant, sample)) + + return outputs + +# types.LambdaType gave false positives +def is_lambda(lamb): + LAMBDA = lambda: 0 # noqa: E731 + return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__ diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7faf45b3c3ceeb9a9eea55c4db6b3aad05f651 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py @@ -0,0 +1,182 @@ +# mypy: ignore-errors + +import torch +from torch.utils._pytree import tree_map +from typing import Iterator, List, Optional +import logging +import contextlib +import itertools +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.weak import WeakTensorKeyDictionary +import functools +from torch._C._profiler import gather_traceback, symbolize_tracebacks + +logger = logging.getLogger("LoggingTensor") + +_dtype_abbrs = { + torch.bfloat16: "bf16", + torch.float64: "f64", + torch.float32: "f32", + torch.float16: "f16", + torch.complex32: "c32", + torch.complex64: "c64", + torch.complex128: "c128", + torch.int8: "i8", + torch.int16: "i16", + torch.int32: "i32", + torch.int64: "i64", + torch.bool: "b8", + torch.uint8: "u8", +} + +# How the chain of calls works for LoggingTensor: +# 1. Call torch.sin +# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely +# 3. Enter dispatcher, wind your way through Autograd +# 4. Hit Python dispatch key, call __torch_dispatch__ + +# This Tensor can work with autograd in two ways: +# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor +# can require gradients if the user asks for it as a constructor kwarg. +# - The wrapped Tensor can require gradients. In that case autograd will be tracked +# for the wrapped Tensor and the LoggingTensor itself cannot require gradients. +# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single +# test or you might get surprising behavior. + +# TODO: TensorBase should work +class LoggingTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + context = contextlib.nullcontext + + @staticmethod + def __new__(cls, elem, *args, **kwargs): + # The wrapping tensor (LoggingTensor) shouldn't hold any + # memory for the class in question, but it should still + # advertise the same device as before + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + strides=elem.stride(), storage_offset=elem.storage_offset(), + # TODO: clone storage aliasing + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=kwargs.get("requires_grad", False) + ) + # ...the real tensor is held as an element on the tensor. + r.elem = elem.detach() if r.requires_grad else elem + return r + + def __repr__(self): + return super().__repr__(tensor_contents=f"{self.elem}") + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, cls) else e + + def wrap(e): + return cls(e) if isinstance(e, torch.Tensor) else e + + with cls.context(): + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004 + return rs + +class LoggingTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + rs = func(*args, **kwargs) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004 + return rs + +class LoggingTensorReentrant(LoggingTensor): + context = torch.overrides.enable_reentrant_dispatch + +# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list +class LoggingTensorHandler(logging.Handler): + def __init__( + self, log_list: List[str], use_shortid_for_all_tensors: bool, + with_type: bool, tracebacks_list: Optional[List]) -> None: + logging.Handler.__init__(self) + self.log_list = log_list + self.use_shortid_for_all_tensors = use_shortid_for_all_tensors + self.tracebacks_list = tracebacks_list + self.memo = WeakTensorKeyDictionary() + self.next_id = 0 + self.with_type = with_type + + def _shortid(self, t: torch.Tensor) -> int: + if t not in self.memo: + self.memo[t] = self.next_id + self.next_id += 1 + return self.memo[t] + + def _fmt(self, a: object, with_type: bool = False) -> str: + cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor + if isinstance(a, cond_cls): + maybe_type = "" + if with_type and self.with_type: + maybe_type = f": {_dtype_abbrs[a.dtype]}[{', '.join(map(str, a.shape))}]" + x = f"${self._shortid(a)}{maybe_type}" + return x + else: + return repr(a) + + def emit(self, record): + fmt_args = ", ".join( + itertools.chain( + (str(tree_map(self._fmt, a)) for a in record.args[0]), + (f"{k}={str(tree_map(self._fmt, v))}" for k, v in record.args[1].items()), + ) + ) + fmt_rets = tree_map(functools.partial(self._fmt, with_type=True), record.args[2]) + self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})') + if self.tracebacks_list is not None: + self.tracebacks_list.append(record.traceback) + +def log_input(name: str, var: object) -> None: + logger.info("input", (name,), {}, var) # noqa: PLE1205 + +class GatherTraceback(logging.Filter): + def __init__(self, python=True, script=True, cpp=False): + self.python = python + self.script = script + self.cpp = cpp + + def filter(self, record): + record.traceback = gather_traceback(python=self.python, script=self.script, cpp=self.cpp) + return True + +@contextlib.contextmanager +def capture_logs(is_mode=False, python_tb=False, script_tb=False, cpp_tb=False) -> Iterator[List[str]]: + collect_traceback = python_tb or script_tb or cpp_tb + log_list: List[str] = [] + tracebacks_list: List[str] = [] + handler = LoggingTensorHandler( + log_list, + with_type=True, + use_shortid_for_all_tensors=is_mode, + tracebacks_list=tracebacks_list if collect_traceback else None + ) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + logger.propagate = False + if collect_traceback: + logger.addFilter(GatherTraceback(python=python_tb, script=script_tb, cpp=cpp_tb)) + try: + if collect_traceback: + yield log_list, tracebacks_list + else: + yield log_list + finally: + symbolized_tracebacks = symbolize_tracebacks(tracebacks_list) + tracebacks_list.clear() + tracebacks_list.extend(symbolized_tracebacks) + logger.removeHandler(handler) + +@contextlib.contextmanager +def capture_logs_with_logging_tensor_mode(python_tb=False, script_tb=False, cpp_tb=False): + with LoggingTensorMode(), capture_logs(True, python_tb, script_tb, cpp_tb) as logs: + yield logs diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f97d0281b139b1115fb9f765782e4d641dc7eda5 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py @@ -0,0 +1,213 @@ +# mypy: ignore-errors + +import torch._dynamo.test_case +import unittest.mock +import os +import contextlib +import torch._logging +import torch._logging._internal +from torch._dynamo.utils import LazyString +from torch._inductor import config as inductor_config +import logging +import io + +@contextlib.contextmanager +def preserve_log_state(): + prev_state = torch._logging._internal._get_log_state() + torch._logging._internal._set_log_state(torch._logging._internal.LogState()) + try: + yield + finally: + torch._logging._internal._set_log_state(prev_state) + torch._logging._internal._init_logs() + +def log_settings(settings): + exit_stack = contextlib.ExitStack() + settings_patch = unittest.mock.patch.dict(os.environ, {"TORCH_LOGS": settings}) + exit_stack.enter_context(preserve_log_state()) + exit_stack.enter_context(settings_patch) + torch._logging._internal._init_logs() + return exit_stack + +def log_api(**kwargs): + exit_stack = contextlib.ExitStack() + exit_stack.enter_context(preserve_log_state()) + torch._logging.set_logs(**kwargs) + return exit_stack + + +def kwargs_to_settings(**kwargs): + INT_TO_VERBOSITY = {10: "+", 20: "", 40: "-"} + + settings = [] + + def append_setting(name, level): + if isinstance(name, str) and isinstance(level, int) and level in INT_TO_VERBOSITY: + settings.append(INT_TO_VERBOSITY[level] + name) + return + else: + raise ValueError("Invalid value for setting") + + for name, val in kwargs.items(): + if isinstance(val, bool): + settings.append(name) + elif isinstance(val, int): + append_setting(name, val) + elif isinstance(val, dict) and name == "modules": + for module_qname, level in val.items(): + append_setting(module_qname, level) + else: + raise ValueError("Invalid value for setting") + + return ",".join(settings) + + +# Note on testing strategy: +# This class does two things: +# 1. Runs two versions of a test: +# 1a. patches the env var log settings to some specific value +# 1b. calls torch._logging.set_logs(..) +# 2. patches the emit method of each setup handler to gather records +# that are emitted to each console stream +# 3. passes a ref to the gathered records to each test case for checking +# +# The goal of this testing in general is to ensure that given some settings env var +# that the logs are setup correctly and capturing the correct records. +def make_logging_test(**kwargs): + def wrapper(fn): + @inductor_config.patch({"fx_graph_cache": False}) + def test_fn(self): + + torch._dynamo.reset() + records = [] + # run with env var + if len(kwargs) == 0: + with self._handler_watcher(records): + fn(self, records) + else: + with log_settings(kwargs_to_settings(**kwargs)), self._handler_watcher(records): + fn(self, records) + + # run with API + torch._dynamo.reset() + records.clear() + with log_api(**kwargs), self._handler_watcher(records): + fn(self, records) + + + return test_fn + + return wrapper + +def make_settings_test(settings): + def wrapper(fn): + def test_fn(self): + torch._dynamo.reset() + records = [] + # run with env var + with log_settings(settings), self._handler_watcher(records): + fn(self, records) + + return test_fn + + return wrapper + +class LoggingTestCase(torch._dynamo.test_case.TestCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack.enter_context( + unittest.mock.patch.dict(os.environ, {"___LOG_TESTING": ""}) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.suppress_errors", True) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.verbose", False) + ) + + @classmethod + def tearDownClass(cls): + cls._exit_stack.close() + torch._logging._internal.log_state.clear() + torch._logging._init_logs() + + def hasRecord(self, records, m): + return any(m in r.getMessage() for r in records) + + def getRecord(self, records, m): + record = None + for r in records: + # NB: not r.msg because it looks like 3.11 changed how they + # structure log records + if m in r.getMessage(): + self.assertIsNone( + record, + msg=LazyString( + lambda: f"multiple matching records: {record} and {r} among {records}" + ), + ) + record = r + if record is None: + self.fail(f"did not find record with {m} among {records}") + return record + + # This patches the emit method of each handler to gather records + # as they are emitted + def _handler_watcher(self, record_list): + exit_stack = contextlib.ExitStack() + + def emit_post_hook(record): + nonlocal record_list + record_list.append(record) + + # registered logs are the only ones with handlers, so patch those + for log_qname in torch._logging._internal.log_registry.get_log_qnames(): + logger = logging.getLogger(log_qname) + num_handlers = len(logger.handlers) + self.assertLessEqual( + num_handlers, + 2, + "All pt2 loggers should only have at most two handlers (debug artifacts and messages above debug level).", + ) + + self.assertGreater(num_handlers, 0, "All pt2 loggers should have more than zero handlers") + + for handler in logger.handlers: + old_emit = handler.emit + + def new_emit(record): + old_emit(record) + emit_post_hook(record) + + exit_stack.enter_context( + unittest.mock.patch.object(handler, "emit", new_emit) + ) + + return exit_stack + + +def logs_to_string(module, log_option): + """Example: + logs_to_string("torch._inductor.compile_fx", "post_grad_graphs") + returns the output of TORCH_LOGS="post_grad_graphs" from the + torch._inductor.compile_fx module. + """ + log_stream = io.StringIO() + handler = logging.StreamHandler(stream=log_stream) + + @contextlib.contextmanager + def tmp_redirect_logs(): + try: + logger = torch._logging.getArtifactLogger(module, log_option) + logger.addHandler(handler) + yield + finally: + logger.removeHandler(handler) + + def ctx_manager(): + exit_stack = log_settings(log_option) + exit_stack.enter_context(tmp_redirect_logs()) + return exit_stack + + return log_stream, ctx_manager diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/static_module.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/static_module.py new file mode 100644 index 0000000000000000000000000000000000000000..0a031b0d8f6e685517b7ac51c236e23835501cd9 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/static_module.py @@ -0,0 +1,27 @@ +# mypy: allow-untyped-defs +# Owner(s): ["module: unknown"] + +import torch + + +class StaticModule: + def __init__(self, scripted): + # this is an nn.Module + if hasattr(scripted, "_c"): + self.static_module = torch._C._jit_to_static_module(scripted._c) + else: + self.static_module = torch._C._jit_to_static_module(scripted.graph) + + def __call__(self, *args, **kwargs): + return self.static_module(*args, **kwargs) + + def benchmark(self, args, kwargs, warmup_runs, main_runs): + self.static_module.benchmark(args, kwargs, warmup_runs, main_runs) + + def runAsync(self, args, kwargs): + return self.static_module.runAsync(args, kwargs) + + def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs): + return self.static_module.benchmark_individual_ops( + args, kwargs, warmup_runs, main_runs + ) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py new file mode 100644 index 0000000000000000000000000000000000000000..5d127a9a50c488989ac02f30255fbd06e42c4063 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py @@ -0,0 +1,132 @@ +# mypy: allow-untyped-defs +import contextlib +from typing import Optional + +import torch + + +_TORCHBIND_IMPLS_INITIALIZED = False + +_TENSOR_QUEUE_GLOBAL_TEST: Optional[torch.ScriptObject] = None + + +def init_torchbind_implementations(): + global _TORCHBIND_IMPLS_INITIALIZED + global _TENSOR_QUEUE_GLOBAL_TEST + if _TORCHBIND_IMPLS_INITIALIZED: + return + + load_torchbind_test_lib() + register_fake_operators() + register_fake_classes() + _TENSOR_QUEUE_GLOBAL_TEST = _empty_tensor_queue() + _TORCHBIND_IMPLS_INITIALIZED = True + + +def _empty_tensor_queue() -> torch.ScriptObject: + return torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + + +# put these under a function because the corresponding library might not be loaded yet. +def register_fake_operators(): + @torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta") + def fake_takes_foo(foo, z): + return foo.add_tensor(z) + + @torch.library.register_fake("_TorchScriptTesting::queue_pop") + def fake_queue_pop(tq): + return tq.pop() + + @torch.library.register_fake("_TorchScriptTesting::queue_push") + def fake_queue_push(tq, x): + return tq.push(x) + + @torch.library.register_fake("_TorchScriptTesting::queue_size") + def fake_queue_size(tq): + return tq.size() + + def meta_takes_foo_list_return(foo, x): + a = foo.add_tensor(x) + b = foo.add_tensor(a) + c = foo.add_tensor(b) + return [a, b, c] + + def meta_takes_foo_tuple_return(foo, x): + a = foo.add_tensor(x) + b = foo.add_tensor(a) + return (a, b) + + torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl( + torch._C.DispatchKey.Meta + )(meta_takes_foo_list_return) + + torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl( + torch._C.DispatchKey.Meta + )(meta_takes_foo_tuple_return) + + torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)( + lambda cc, x: cc.add_tensor(x) + ) + + +def register_fake_classes(): + @torch._library.register_fake_class("_TorchScriptTesting::_Foo") + class FakeFoo: + def __init__(self, x: int, y: int): + self.x = x + self.y = y + + @classmethod + def __obj_unflatten__(cls, flattend_foo): + return cls(**dict(flattend_foo)) + + def add_tensor(self, z): + return (self.x + self.y) * z + + @torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor") + class FakeContainsTensor: + def __init__(self, t: torch.Tensor): + self.t = t + + @classmethod + def __obj_unflatten__(cls, flattend_foo): + return cls(**dict(flattend_foo)) + + def get(self): + return self.t + + +def load_torchbind_test_lib(): + import unittest + + from torch.testing._internal.common_utils import ( # type: ignore[attr-defined] + find_library_location, + IS_FBCODE, + IS_MACOS, + IS_SANDCASTLE, + IS_WINDOWS, + ) + + if IS_SANDCASTLE or IS_FBCODE: + torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations") + elif IS_MACOS: + raise unittest.SkipTest("non-portable load_library call used in test") + else: + lib_file_path = find_library_location("libtorchbind_test.so") + if IS_WINDOWS: + lib_file_path = find_library_location("torchbind_test.dll") + torch.ops.load_library(str(lib_file_path)) + + +@contextlib.contextmanager +def _register_py_impl_temporarily(op_overload, key, fn): + try: + op_overload.py_impl(key)(fn) + yield + finally: + del op_overload.py_kernels[key] + op_overload._dispatch_cache.clear() diff --git a/valley/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..301c3cd4723e34c92c3de53497cbd19e89d38ccd --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py @@ -0,0 +1,398 @@ +# mypy: ignore-errors + +import unittest + +from torch.testing._internal.inductor_utils import HAS_CUDA +from torch.utils._triton import has_triton + +requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") + +if has_triton(): + import triton + from triton import language as tl + + # Define here so that multiple tests can take advantage of it + @triton.jit + def add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_with_optional_param( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + ARGS_PASSED: "tl.constexpr", + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + if ARGS_PASSED == "two": + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + else: + output = x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 128}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4), + ], + key=[], + ) + @triton.jit + def add_kernel_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=4, num_warps=4 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=4, num_warps=4 + ), + ], + key=[], + ) + @triton.jit + def add_kernel_2d_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + x_elements, + y_elements, + BLOCK_SIZE_X: "tl.constexpr", + BLOCK_SIZE_Y: "tl.constexpr", + ): + xoffset = tl.program_id(0) * BLOCK_SIZE_X + xindex = xoffset + tl.arange(0, BLOCK_SIZE_X)[:, None] + xmask = xindex < x_elements + yoffset = tl.program_id(1) * BLOCK_SIZE_Y + yindex = yoffset + tl.arange(0, BLOCK_SIZE_Y)[None, :] + ymask = yindex < y_elements + x1 = xindex + y0 = yindex + tmp0 = tl.load(in_ptr0 + (x1 + (x_elements * y0)), xmask & ymask) + tmp1 = tl.load(in_ptr0 + (y0 + (y_elements * x1)), xmask & ymask) + tmp2 = tmp0 + tmp1 + tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask) + + @triton.jit + def add_kernel_with_scaling( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + scaling_factor, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = (x + y) * scaling_factor + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + output = 2 * x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_inplace_kernel( + ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(ptr + offsets, mask=mask) + output = 2 * x + tl.store(ptr + offsets, output, mask=mask) + + @triton.jit + def zero_negs(x): + return tl.where(x >= 0, x, 0) + + @triton.jit + def indirection_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ACTIVATION: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + if ACTIVATION == "mul2_inplace_kernel": + mul2_inplace_kernel(in_ptr0, n_elements, BLOCK_SIZE=BLOCK_SIZE) + elif ACTIVATION == "add_kernel": + add_kernel(in_ptr0, in_ptr0, out_ptr, n_elements, BLOCK_SIZE=BLOCK_SIZE) + x = tl.load(in_ptr0 + offsets, mask=mask) + tl.store(out_ptr + offsets, x, mask=mask) + + @triton.jit + def double_strided_kernel( + in_ptr, + out_ptr, + in_y_stride, + out_y_stride, + X_BLOCK_SIZE: "tl.constexpr", + Y_BLOCK_SIZE: "tl.constexpr", + ): + xid = tl.program_id(axis=0) + yid = tl.program_id(axis=1) + x_start = xid * X_BLOCK_SIZE + y_start = yid * Y_BLOCK_SIZE + x_offsets = x_start + tl.arange(0, X_BLOCK_SIZE) + y_offsets = y_start + tl.arange(0, Y_BLOCK_SIZE) + src_offsets = y_offsets[:, None] * in_y_stride + x_offsets[None, :] + dst_offsets = y_offsets[:, None] * out_y_stride + x_offsets[None, :] + src = tl.load(in_ptr + src_offsets) + tl.store(out_ptr + dst_offsets, src * 2.0) + + @triton.jit + def inline_asm_kernel(X, Y, Z, n: "tl.constexpr", BLOCK: "tl.constexpr"): + x = tl.load(X + tl.arange(0, BLOCK)) + y = tl.load(Y + tl.arange(0, BLOCK)) + s = tl.full([BLOCK], n, tl.int32) + z = tl.inline_asm_elementwise( + "shf.l.wrap.b32 $0, $1, $2, $3;", + "=r,r, r, r", + [x, y, s], + dtype=tl.int32, + is_pure=True, + pack=1, + ) + tl.store(Z + tl.arange(0, BLOCK), z) + + @triton.jit + def add_kernel_with_block_ptr( + x_ptr, + y_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + y = tl.load( + tl.make_block_ptr( + base=y_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + output = x + y + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + output, + boundary_check=[0], + ) + + @triton.jit + def kernel_with_block_ptr_2d( + x_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + boundary_check=[0], + ) + output = x + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + output, + boundary_check=[0], + ) + + from triton.language import load, store + + @triton.jit + def add_kernel_with_import( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = load(in_ptr0 + offsets, mask=mask) + y = load(in_ptr1 + offsets, mask=mask) + output = x + y + store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def cond_op_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + if tl.program_id(0) == 0: + output = x + y + else: + output = x * y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def atomic_add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.atomic_add(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_4_times_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + for i in range(2): + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + i = 2 + while i > 0: + i -= 1 + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_out_of_order_fn2( + in_ptr0, + in_ptr1, + n_elements, + out_ptr, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) diff --git a/valley/lib/python3.10/site-packages/torch/testing/_utils.py b/valley/lib/python3.10/site-packages/torch/testing/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..50d077cb1649b21572b6b07c063441572ecd0106 --- /dev/null +++ b/valley/lib/python3.10/site-packages/torch/testing/_utils.py @@ -0,0 +1,51 @@ +# mypy: allow-untyped-defs +import contextlib + +import torch + +# Common testing utilities for use in public testing APIs. +# NB: these should all be importable without optional dependencies +# (like numpy and expecttest). + + +def wrapper_set_seed(op, *args, **kwargs): + """Wrapper to set seed manually for some functions like dropout + See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details. + """ + with freeze_rng_state(): + torch.manual_seed(42) + output = op(*args, **kwargs) + + if isinstance(output, torch.Tensor) and output.device.type == "lazy": + # We need to call mark step inside freeze_rng_state so that numerics + # match eager execution + torch._lazy.mark_step() # type: ignore[attr-defined] + + return output + + +@contextlib.contextmanager +def freeze_rng_state(): + # no_dispatch needed for test_composite_compliance + # Some OpInfos use freeze_rng_state for rng determinism, but + # test_composite_compliance overrides dispatch for all torch functions + # which we need to disable to get and set rng state + with torch.utils._mode_utils.no_dispatch(), torch._C._DisableFuncTorch(): + rng_state = torch.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + try: + yield + finally: + # Modes are not happy with torch.cuda.set_rng_state + # because it clones the state (which could produce a Tensor Subclass) + # and then grabs the new tensor's data pointer in generator.set_state. + # + # In the long run torch.cuda.set_rng_state should probably be + # an operator. + # + # NB: Mode disable is to avoid running cross-ref tests on thes seeding + with torch.utils._mode_utils.no_dispatch(), torch._C._DisableFuncTorch(): + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + torch.set_rng_state(rng_state)